sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
Comfy-Org/ComfyUI:comfy/ldm/chroma/model.py | #Original code can be found on: https://github.com/black-forest-labs/flux
from dataclasses import dataclass
import torch
from torch import Tensor, nn
from einops import rearrange, repeat
import comfy.patcher_extension
import comfy.ldm.common_dit
from comfy.ldm.flux.layers import (
EmbedND,
timestep_embedding,
DoubleStreamBlock,
SingleStreamBlock,
)
from .layers import (
LastLayer,
Approximator,
ChromaModulationOut,
)
@dataclass
class ChromaParams:
in_channels: int
out_channels: int
context_in_dim: int
hidden_size: int
mlp_ratio: float
num_heads: int
depth: int
depth_single_blocks: int
axes_dim: list
theta: int
patch_size: int
qkv_bias: bool
in_dim: int
out_dim: int
hidden_dim: int
n_layers: int
txt_ids_dims: list
vec_in_dim: int
class Chroma(nn.Module):
"""
Transformer model for flow matching on sequences.
"""
def __init__(self, image_model=None, final_layer=True, dtype=None, device=None, operations=None, **kwargs):
super().__init__()
self.dtype = dtype
params = ChromaParams(**kwargs)
self.params = params
self.patch_size = params.patch_size
self.in_channels = params.in_channels
self.out_channels = params.out_channels
if params.hidden_size % params.num_heads != 0:
raise ValueError(
f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}"
)
pe_dim = params.hidden_size // params.num_heads
if sum(params.axes_dim) != pe_dim:
raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
self.hidden_size = params.hidden_size
self.num_heads = params.num_heads
self.in_dim = params.in_dim
self.out_dim = params.out_dim
self.hidden_dim = params.hidden_dim
self.n_layers = params.n_layers
self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
self.img_in = operations.Linear(self.in_channels, self.hidden_size, bias=True, dtype=dtype, device=device)
self.txt_in = operations.Linear(params.context_in_dim, self.hidden_size, dtype=dtype, device=device)
# set as nn identity for now, will overwrite it later.
self.distilled_guidance_layer = Approximator(
in_dim=self.in_dim,
hidden_dim=self.hidden_dim,
out_dim=self.out_dim,
n_layers=self.n_layers,
dtype=dtype, device=device, operations=operations
)
self.double_blocks = nn.ModuleList(
[
DoubleStreamBlock(
self.hidden_size,
self.num_heads,
mlp_ratio=params.mlp_ratio,
qkv_bias=params.qkv_bias,
modulation=False,
dtype=dtype, device=device, operations=operations
)
for _ in range(params.depth)
]
)
self.single_blocks = nn.ModuleList(
[
SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio, modulation=False, dtype=dtype, device=device, operations=operations)
for _ in range(params.depth_single_blocks)
]
)
if final_layer:
self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels, dtype=dtype, device=device, operations=operations)
self.skip_mmdit = []
self.skip_dit = []
self.lite = False
def get_modulations(self, tensor: torch.Tensor, block_type: str, *, idx: int = 0):
# This function slices up the modulations tensor which has the following layout:
# single : num_single_blocks * 3 elements
# double_img : num_double_blocks * 6 elements
# double_txt : num_double_blocks * 6 elements
# final : 2 elements
if block_type == "final":
return (tensor[:, -2:-1, :], tensor[:, -1:, :])
single_block_count = self.params.depth_single_blocks
double_block_count = self.params.depth
offset = 3 * idx
if block_type == "single":
return ChromaModulationOut.from_offset(tensor, offset)
# Double block modulations are 6 elements so we double 3 * idx.
offset *= 2
if block_type in {"double_img", "double_txt"}:
# Advance past the single block modulations.
offset += 3 * single_block_count
if block_type == "double_txt":
# Advance past the double block img modulations.
offset += 6 * double_block_count
return (
ChromaModulationOut.from_offset(tensor, offset),
ChromaModulationOut.from_offset(tensor, offset + 3),
)
raise ValueError("Bad block_type")
def forward_orig(
self,
img: Tensor,
img_ids: Tensor,
txt: Tensor,
txt_ids: Tensor,
timesteps: Tensor,
guidance: Tensor = None,
control = None,
transformer_options={},
attn_mask: Tensor = None,
) -> Tensor:
transformer_options = transformer_options.copy()
patches_replace = transformer_options.get("patches_replace", {})
# running on sequences img
img = self.img_in(img)
# distilled vector guidance
mod_index_length = 344
distill_timestep = timestep_embedding(timesteps.detach().clone(), 16).to(img.device, img.dtype)
# guidance = guidance *
distil_guidance = timestep_embedding(guidance.detach().clone(), 16).to(img.device, img.dtype)
# get all modulation index
modulation_index = timestep_embedding(torch.arange(mod_index_length, device=img.device), 32).to(img.device, img.dtype)
# we need to broadcast the modulation index here so each batch has all of the index
modulation_index = modulation_index.unsqueeze(0).repeat(img.shape[0], 1, 1).to(img.device, img.dtype)
# and we need to broadcast timestep and guidance along too
timestep_guidance = torch.cat([distill_timestep, distil_guidance], dim=1).unsqueeze(1).repeat(1, mod_index_length, 1).to(img.dtype).to(img.device, img.dtype)
# then and only then we could concatenate it together
input_vec = torch.cat([timestep_guidance, modulation_index], dim=-1).to(img.device, img.dtype)
mod_vectors = self.distilled_guidance_layer(input_vec)
txt = self.txt_in(txt)
ids = torch.cat((txt_ids, img_ids), dim=1)
pe = self.pe_embedder(ids)
blocks_replace = patches_replace.get("dit", {})
transformer_options["total_blocks"] = len(self.double_blocks)
transformer_options["block_type"] = "double"
for i, block in enumerate(self.double_blocks):
transformer_options["block_index"] = i
if i not in self.skip_mmdit:
double_mod = (
self.get_modulations(mod_vectors, "double_img", idx=i),
self.get_modulations(mod_vectors, "double_txt", idx=i),
)
if ("double_block", i) in blocks_replace:
def block_wrap(args):
out = {}
out["img"], out["txt"] = block(img=args["img"],
txt=args["txt"],
vec=args["vec"],
pe=args["pe"],
attn_mask=args.get("attn_mask"),
transformer_options=args.get("transformer_options"))
return out
out = blocks_replace[("double_block", i)]({"img": img,
"txt": txt,
"vec": double_mod,
"pe": pe,
"attn_mask": attn_mask,
"transformer_options": transformer_options},
{"original_block": block_wrap})
txt = out["txt"]
img = out["img"]
else:
img, txt = block(img=img,
txt=txt,
vec=double_mod,
pe=pe,
attn_mask=attn_mask,
transformer_options=transformer_options)
if control is not None: # Controlnet
control_i = control.get("input")
if i < len(control_i):
add = control_i[i]
if add is not None:
img += add
img = torch.cat((txt, img), 1)
transformer_options["total_blocks"] = len(self.single_blocks)
transformer_options["block_type"] = "single"
transformer_options["img_slice"] = [txt.shape[1], img.shape[1]]
for i, block in enumerate(self.single_blocks):
transformer_options["block_index"] = i
if i not in self.skip_dit:
single_mod = self.get_modulations(mod_vectors, "single", idx=i)
if ("single_block", i) in blocks_replace:
def block_wrap(args):
out = {}
out["img"] = block(args["img"],
vec=args["vec"],
pe=args["pe"],
attn_mask=args.get("attn_mask"),
transformer_options=args.get("transformer_options"))
return out
out = blocks_replace[("single_block", i)]({"img": img,
"vec": single_mod,
"pe": pe,
"attn_mask": attn_mask,
"transformer_options": transformer_options},
{"original_block": block_wrap})
img = out["img"]
else:
img = block(img, vec=single_mod, pe=pe, attn_mask=attn_mask, transformer_options=transformer_options)
if control is not None: # Controlnet
control_o = control.get("output")
if i < len(control_o):
add = control_o[i]
if add is not None:
img[:, txt.shape[1] :, ...] += add
img = img[:, txt.shape[1] :, ...]
if hasattr(self, "final_layer"):
final_mod = self.get_modulations(mod_vectors, "final")
img = self.final_layer(img, vec=final_mod) # (N, T, patch_size ** 2 * out_channels)
return img
def forward(self, x, timestep, context, guidance, control=None, transformer_options={}, **kwargs):
return comfy.patcher_extension.WrapperExecutor.new_class_executor(
self._forward,
self,
comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.DIFFUSION_MODEL, transformer_options)
).execute(x, timestep, context, guidance, control, transformer_options, **kwargs)
def _forward(self, x, timestep, context, guidance, control=None, transformer_options={}, **kwargs):
bs, c, h, w = x.shape
x = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size))
img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=self.patch_size, pw=self.patch_size)
if img.ndim != 3 or context.ndim != 3:
raise ValueError("Input img and txt tensors must have 3 dimensions.")
h_len = ((h + (self.patch_size // 2)) // self.patch_size)
w_len = ((w + (self.patch_size // 2)) // self.patch_size)
img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype)
img_ids[:, :, 1] = img_ids[:, :, 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype).unsqueeze(1)
img_ids[:, :, 2] = img_ids[:, :, 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype).unsqueeze(0)
img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype)
out = self.forward_orig(img, img_ids, context, txt_ids, timestep, guidance, control, transformer_options, attn_mask=kwargs.get("attention_mask", None))
return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=self.patch_size, pw=self.patch_size)[:,:,:h,:w]
| {
"repo_id": "Comfy-Org/ComfyUI",
"file_path": "comfy/ldm/chroma/model.py",
"license": "GNU General Public License v3.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Comfy-Org/ComfyUI:comfy_api/input/basic_types.py | # This file only exists for backwards compatibility.
from comfy_api.latest._input.basic_types import (
ImageInput,
AudioInput,
MaskInput,
LatentInput,
)
__all__ = [
"ImageInput",
"AudioInput",
"MaskInput",
"LatentInput",
]
| {
"repo_id": "Comfy-Org/ComfyUI",
"file_path": "comfy_api/input/basic_types.py",
"license": "GNU General Public License v3.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Comfy-Org/ComfyUI:comfy_api/input/video_types.py | # This file only exists for backwards compatibility.
from comfy_api.latest._input.video_types import VideoInput
__all__ = [
"VideoInput",
]
| {
"repo_id": "Comfy-Org/ComfyUI",
"file_path": "comfy_api/input/video_types.py",
"license": "GNU General Public License v3.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Comfy-Org/ComfyUI:comfy_api/util/video_types.py | # This file only exists for backwards compatibility.
from comfy_api.latest._util.video_types import (
VideoContainer,
VideoCodec,
VideoComponents,
)
__all__ = [
"VideoContainer",
"VideoCodec",
"VideoComponents",
]
| {
"repo_id": "Comfy-Org/ComfyUI",
"file_path": "comfy_api/util/video_types.py",
"license": "GNU General Public License v3.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
D4Vinci/Scrapling:scrapling/engines/toolbelt/proxy_rotation.py | from threading import Lock
from scrapling.core._types import Callable, Dict, List, Tuple, ProxyType
RotationStrategy = Callable[[List[ProxyType], int], Tuple[ProxyType, int]]
_PROXY_ERROR_INDICATORS = {
"net::err_proxy",
"net::err_tunnel",
"connection refused",
"connection reset",
"connection timed out",
"failed to connect",
"could not resolve proxy",
}
def _get_proxy_key(proxy: ProxyType) -> str:
"""Generate a unique key for a proxy (for dicts it's server plus username)."""
if isinstance(proxy, str):
return proxy
server = proxy.get("server", "")
username = proxy.get("username", "")
return f"{server}|{username}"
def is_proxy_error(error: Exception) -> bool:
"""Check if an error is proxy-related. Works for both HTTP and browser errors."""
error_msg = str(error).lower()
return any(indicator in error_msg for indicator in _PROXY_ERROR_INDICATORS)
def cyclic_rotation(proxies: List[ProxyType], current_index: int) -> Tuple[ProxyType, int]:
"""Default cyclic rotation strategy — iterates through proxies sequentially, wrapping around at the end."""
idx = current_index % len(proxies)
return proxies[idx], (idx + 1) % len(proxies)
class ProxyRotator:
"""
A thread-safe proxy rotator with pluggable rotation strategies.
Supports:
- Cyclic rotation (default)
- Custom rotation strategies via callable
- Both string URLs and Playwright-style dict proxies
"""
__slots__ = ("_proxies", "_proxy_to_index", "_strategy", "_current_index", "_lock")
def __init__(
self,
proxies: List[ProxyType],
strategy: RotationStrategy = cyclic_rotation,
):
"""
Initialize the proxy rotator.
:param proxies: List of proxy URLs or Playwright-style proxy dicts.
- String format: "http://proxy1:8080" or "http://user:pass@proxy:8080"
- Dict format: {"server": "http://proxy:8080", "username": "user", "password": "pass"}
:param strategy: Rotation strategy function. Takes (proxies, current_index) and returns (proxy, next_index). Defaults to cyclic_rotation.
"""
if not proxies:
raise ValueError("At least one proxy must be provided")
if not callable(strategy):
raise TypeError(f"strategy must be callable, got {type(strategy).__name__}")
self._strategy = strategy
self._lock = Lock()
# Validate and store proxies
self._proxies: List[ProxyType] = []
self._proxy_to_index: Dict[str, int] = {} # O(1) lookup by unique key (server + username)
for i, proxy in enumerate(proxies):
if isinstance(proxy, (str, dict)):
if isinstance(proxy, dict) and "server" not in proxy:
raise ValueError("Proxy dict must have a 'server' key")
self._proxy_to_index[_get_proxy_key(proxy)] = i
self._proxies.append(proxy)
else:
raise TypeError(f"Invalid proxy type: {type(proxy)}. Expected str or dict.")
self._current_index = 0
def get_proxy(self) -> ProxyType:
"""Get the next proxy according to the rotation strategy."""
with self._lock:
proxy, self._current_index = self._strategy(self._proxies, self._current_index)
return proxy
@property
def proxies(self) -> List[ProxyType]:
"""Get a copy of all configured proxies."""
return list(self._proxies)
def __len__(self) -> int:
"""Return the total number of configured proxies."""
return len(self._proxies)
def __repr__(self) -> str:
return f"ProxyRotator(proxies={len(self._proxies)})"
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/engines/toolbelt/proxy_rotation.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:scrapling/spiders/checkpoint.py | import pickle
from pathlib import Path
from dataclasses import dataclass, field
import anyio
from anyio import Path as AsyncPath
from scrapling.core.utils import log
from scrapling.core._types import Set, List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from scrapling.spiders.request import Request
@dataclass
class CheckpointData:
"""Container for checkpoint state."""
requests: List["Request"] = field(default_factory=list)
seen: Set[bytes] = field(default_factory=set)
class CheckpointManager:
"""Manages saving and loading checkpoint state to/from disk."""
CHECKPOINT_FILE = "checkpoint.pkl"
def __init__(self, crawldir: str | Path | AsyncPath, interval: float = 300.0):
self.crawldir = AsyncPath(crawldir)
self._checkpoint_path = self.crawldir / self.CHECKPOINT_FILE
self.interval = interval
if not isinstance(interval, (int, float)):
raise TypeError("Checkpoints interval must be integer or float.")
else:
if interval < 0:
raise ValueError("Checkpoints interval must be equal or greater than 0.")
async def has_checkpoint(self) -> bool:
"""Check if a checkpoint exists."""
return await self._checkpoint_path.exists()
async def save(self, data: CheckpointData) -> None:
"""Save checkpoint data to disk atomically."""
await self.crawldir.mkdir(parents=True, exist_ok=True)
temp_path = self._checkpoint_path.with_suffix(".tmp")
try:
serialized = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
async with await anyio.open_file(temp_path, "wb") as f:
await f.write(serialized)
await temp_path.rename(self._checkpoint_path)
log.info(f"Checkpoint saved: {len(data.requests)} requests, {len(data.seen)} seen URLs")
except Exception as e:
# Clean up temp file if it exists
if await temp_path.exists():
await temp_path.unlink()
log.error(f"Failed to save checkpoint: {e}")
raise
async def load(self) -> Optional[CheckpointData]:
"""Load checkpoint data from disk.
Returns None if no checkpoint exists or if loading fails.
"""
if not await self.has_checkpoint():
return None
try:
async with await anyio.open_file(self._checkpoint_path, "rb") as f:
content = await f.read()
data: CheckpointData = pickle.loads(content)
log.info(f"Checkpoint loaded: {len(data.requests)} requests, {len(data.seen)} seen URLs")
return data
except Exception as e:
log.error(f"Failed to load checkpoint (starting fresh): {e}")
return None
async def cleanup(self) -> None:
"""Delete checkpoint file after successful completion."""
try:
if await self._checkpoint_path.exists():
await self._checkpoint_path.unlink()
log.debug("Checkpoint file cleaned up")
except Exception as e:
log.warning(f"Failed to cleanup checkpoint file: {e}")
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/spiders/checkpoint.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:scrapling/spiders/engine.py | import json
import pprint
from pathlib import Path
import anyio
from anyio import Path as AsyncPath
from anyio import create_task_group, CapacityLimiter, create_memory_object_stream, EndOfStream
from scrapling.core.utils import log
from scrapling.spiders.request import Request
from scrapling.spiders.scheduler import Scheduler
from scrapling.spiders.session import SessionManager
from scrapling.spiders.result import CrawlStats, ItemList
from scrapling.spiders.checkpoint import CheckpointManager, CheckpointData
from scrapling.core._types import Dict, Union, Optional, TYPE_CHECKING, Any, AsyncGenerator
if TYPE_CHECKING:
from scrapling.spiders.spider import Spider
def _dump(obj: Dict) -> str:
return json.dumps(obj, indent=4)
class CrawlerEngine:
"""Orchestrates the crawling process."""
def __init__(
self,
spider: "Spider",
session_manager: SessionManager,
crawldir: Optional[Union[str, Path, AsyncPath]] = None,
interval: float = 300.0,
):
self.spider = spider
self.session_manager = session_manager
self.scheduler = Scheduler(
include_kwargs=spider.fp_include_kwargs,
include_headers=spider.fp_include_headers,
keep_fragments=spider.fp_keep_fragments,
)
self.stats = CrawlStats()
self._global_limiter = CapacityLimiter(spider.concurrent_requests)
self._domain_limiters: dict[str, CapacityLimiter] = {}
self._allowed_domains: set[str] = spider.allowed_domains or set()
self._active_tasks: int = 0
self._running: bool = False
self._items: ItemList = ItemList()
self._item_stream: Any = None
self._checkpoint_system_enabled = bool(crawldir)
self._checkpoint_manager = CheckpointManager(crawldir or "", interval)
self._last_checkpoint_time: float = 0.0
self._pause_requested: bool = False
self._force_stop: bool = False
self.paused: bool = False
def _is_domain_allowed(self, request: Request) -> bool:
"""Check if the request's domain is in allowed_domains."""
if not self._allowed_domains:
return True
domain = request.domain
for allowed in self._allowed_domains:
if domain == allowed or domain.endswith("." + allowed):
return True
return False
def _rate_limiter(self, domain: str) -> CapacityLimiter:
"""Get or create a per-domain concurrency limiter if enabled, otherwise use the global limiter."""
if self.spider.concurrent_requests_per_domain:
if domain not in self._domain_limiters:
self._domain_limiters[domain] = CapacityLimiter(self.spider.concurrent_requests_per_domain)
return self._domain_limiters[domain]
return self._global_limiter
def _normalize_request(self, request: Request) -> None:
"""Normalize request fields before enqueueing.
Resolves empty sid to the session manager's default session ID.
This ensures consistent fingerprinting for requests using the same session.
"""
if not request.sid:
request.sid = self.session_manager.default_session_id
async def _process_request(self, request: Request) -> None:
"""Download and process a single request."""
async with self._rate_limiter(request.domain):
if self.spider.download_delay:
await anyio.sleep(self.spider.download_delay)
if request._session_kwargs.get("proxy"):
self.stats.proxies.append(request._session_kwargs["proxy"])
if request._session_kwargs.get("proxies"):
self.stats.proxies.append(dict(request._session_kwargs["proxies"]))
try:
response = await self.session_manager.fetch(request)
self.stats.increment_requests_count(request.sid or self.session_manager.default_session_id)
self.stats.increment_response_bytes(request.domain, len(response.body))
self.stats.increment_status(response.status)
except Exception as e:
self.stats.failed_requests_count += 1
await self.spider.on_error(request, e)
return
if await self.spider.is_blocked(response):
self.stats.blocked_requests_count += 1
if request._retry_count < self.spider.max_blocked_retries:
retry_request = request.copy()
retry_request._retry_count += 1
retry_request.priority -= 1 # Don't retry immediately
retry_request.dont_filter = True
retry_request._session_kwargs.pop("proxy", None)
retry_request._session_kwargs.pop("proxies", None)
new_request = await self.spider.retry_blocked_request(retry_request, response)
self._normalize_request(new_request)
await self.scheduler.enqueue(new_request)
log.info(
f"Scheduled blocked request for retry ({retry_request._retry_count}/{self.spider.max_blocked_retries}): {request.url}"
)
else:
log.warning(f"Max retries exceeded for blocked request: {request.url}")
return
callback = request.callback if request.callback else self.spider.parse
try:
async for result in callback(response):
if isinstance(result, Request):
if self._is_domain_allowed(result):
self._normalize_request(result)
await self.scheduler.enqueue(result)
else:
self.stats.offsite_requests_count += 1
log.debug(f"Filtered offsite request to: {result.url}")
elif isinstance(result, dict):
processed_result = await self.spider.on_scraped_item(result)
if processed_result:
self.stats.items_scraped += 1
log.debug(f"Scraped from {str(response)}\n{pprint.pformat(processed_result)}")
if self._item_stream:
await self._item_stream.send(processed_result)
else:
self._items.append(processed_result)
else:
self.stats.items_dropped += 1
log.warning(f"Dropped from {str(response)}\n{processed_result}")
elif result is not None:
log.error(f"Spider must return Request, dict or None, got '{type(result)}' in {request}")
except Exception as e:
msg = f"Spider error processing {request}:\n {e}"
log.error(msg, exc_info=e)
await self.spider.on_error(request, e)
async def _task_wrapper(self, request: Request) -> None:
"""Wrapper to track active task count."""
try:
await self._process_request(request)
finally:
self._active_tasks -= 1
def request_pause(self) -> None:
"""Request a graceful pause of the crawl.
First call: requests graceful pause (waits for active tasks).
Second call: forces immediate stop.
"""
if self._force_stop:
return # Already forcing stop
if self._pause_requested:
# Second Ctrl+C - force stop
self._force_stop = True
log.warning("Force stop requested, cancelling immediately...")
else:
self._pause_requested = True
log.info(
"Pause requested, waiting for in-flight requests to complete (press Ctrl+C again to force stop)..."
)
async def _save_checkpoint(self) -> None:
"""Save current state to checkpoint files."""
requests, seen = self.scheduler.snapshot()
data = CheckpointData(requests=requests, seen=seen)
await self._checkpoint_manager.save(data)
self._last_checkpoint_time = anyio.current_time()
def _is_checkpoint_time(self) -> bool:
"""Check if it's time for the periodic checkpoint."""
if not self._checkpoint_system_enabled:
return False
if self._checkpoint_manager.interval == 0:
return False
current_time = anyio.current_time()
return (current_time - self._last_checkpoint_time) >= self._checkpoint_manager.interval
async def _restore_from_checkpoint(self) -> bool:
"""Attempt to restore state from checkpoint.
Returns True if successfully restored, False otherwise.
"""
if not self._checkpoint_system_enabled:
raise
data = await self._checkpoint_manager.load()
if data is None:
return False
self.scheduler.restore(data)
# Restore callbacks from spider after scheduler restore
for request in data.requests:
request._restore_callback(self.spider)
return True
async def crawl(self) -> CrawlStats:
"""Run the spider and return CrawlStats."""
self._running = True
self._items.clear()
self.paused = False
self._pause_requested = False
self._force_stop = False
self.stats = CrawlStats(start_time=anyio.current_time())
# Check for existing checkpoint
resuming = (await self._restore_from_checkpoint()) if self._checkpoint_system_enabled else False
self._last_checkpoint_time = anyio.current_time()
async with self.session_manager:
self.stats.concurrent_requests = self.spider.concurrent_requests
self.stats.concurrent_requests_per_domain = self.spider.concurrent_requests_per_domain
self.stats.download_delay = self.spider.download_delay
await self.spider.on_start(resuming=resuming)
try:
if not resuming:
async for request in self.spider.start_requests():
self._normalize_request(request)
await self.scheduler.enqueue(request)
else:
log.info("Resuming from checkpoint, skipping start_requests()")
# Process queue
async with create_task_group() as tg:
while self._running:
if self._pause_requested:
if self._active_tasks == 0 or self._force_stop:
if self._force_stop:
log.warning(f"Force stopping with {self._active_tasks} active tasks")
tg.cancel_scope.cancel()
# Only save checkpoint if checkpoint system is enabled
if self._checkpoint_system_enabled:
await self._save_checkpoint()
self.paused = True
log.info("Spider paused, checkpoint saved")
else:
log.info("Spider stopped gracefully")
self._running = False
break
# Wait briefly and check again
await anyio.sleep(0.05)
continue
if self._checkpoint_system_enabled and self._is_checkpoint_time():
await self._save_checkpoint()
if self.scheduler.is_empty:
# Empty queue + no active tasks = done
if self._active_tasks == 0:
self._running = False
log.debug("Spider idle")
break
# Brief wait for callbacks to enqueue new requests
await anyio.sleep(0.05)
continue
# Only spawn tasks up to concurrent_requests limit
# This prevents spawning thousands of waiting tasks
if self._active_tasks >= self.spider.concurrent_requests:
await anyio.sleep(0.01)
continue
request = await self.scheduler.dequeue()
self._active_tasks += 1
tg.start_soon(self._task_wrapper, request)
finally:
await self.spider.on_close()
# Clean up checkpoint files on successful completion (not paused)
if not self.paused and self._checkpoint_system_enabled:
await self._checkpoint_manager.cleanup()
self.stats.log_levels_counter = self.spider._log_counter.get_counts()
self.stats.end_time = anyio.current_time()
log.info(_dump(self.stats.to_dict()))
return self.stats
@property
def items(self) -> ItemList:
"""Access scraped items."""
return self._items
def __aiter__(self) -> AsyncGenerator[dict, None]:
return self._stream()
async def _stream(self) -> AsyncGenerator[dict, None]:
"""Async generator that runs crawl and yields items."""
send, recv = create_memory_object_stream[dict](100)
self._item_stream = send
async def run():
try:
await self.crawl()
finally:
await send.aclose()
async with create_task_group() as tg:
tg.start_soon(run)
try:
async for item in recv:
yield item
except EndOfStream:
pass
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/spiders/engine.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 277,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:scrapling/spiders/request.py | import hashlib
from io import BytesIO
from functools import cached_property
from urllib.parse import urlparse, urlencode
import orjson
from w3lib.url import canonicalize_url
from scrapling.engines.toolbelt.custom import Response
from scrapling.core._types import Any, AsyncGenerator, Callable, Dict, Optional, Union, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from scrapling.spiders.spider import Spider
def _convert_to_bytes(value: str | bytes) -> bytes:
if isinstance(value, bytes):
return value
if not isinstance(value, str):
raise TypeError(f"Can't convert {type(value).__name__} to bytes")
return value.encode(encoding="utf-8", errors="ignore")
class Request:
def __init__(
self,
url: str,
sid: str = "",
callback: Callable[[Response], AsyncGenerator[Union[Dict[str, Any], "Request", None], None]] | None = None,
priority: int = 0,
dont_filter: bool = False,
meta: dict[str, Any] | None = None,
_retry_count: int = 0,
**kwargs: Any,
) -> None:
self.url: str = url
self.sid: str = sid
self.callback = callback
self.priority: int = priority
self.dont_filter: bool = dont_filter
self.meta: dict[str, Any] = meta if meta else {}
self._retry_count: int = _retry_count
self._session_kwargs = kwargs if kwargs else {}
self._fp: Optional[bytes] = None
def copy(self) -> "Request":
"""Create a copy of this request."""
return Request(
url=self.url,
sid=self.sid,
callback=self.callback,
priority=self.priority,
dont_filter=self.dont_filter,
meta=self.meta.copy(),
_retry_count=self._retry_count,
**self._session_kwargs,
)
@cached_property
def domain(self) -> str:
return urlparse(self.url).netloc
def update_fingerprint(
self,
include_kwargs: bool = False,
include_headers: bool = False,
keep_fragments: bool = False,
) -> bytes:
"""Generate a unique fingerprint for deduplication.
Caches the result in self._fp after first computation.
"""
if self._fp is not None:
return self._fp
post_data = self._session_kwargs.get("data", {})
body = b""
if post_data:
if isinstance(post_data, dict | list | tuple):
body = urlencode(post_data).encode()
elif isinstance(post_data, str):
body = post_data.encode()
elif isinstance(post_data, BytesIO):
body = post_data.getvalue()
elif isinstance(post_data, bytes):
body = post_data
else:
post_data = self._session_kwargs.get("json", {})
body = orjson.dumps(post_data) if post_data else b""
data: Dict[str, str | Tuple] = {
"sid": self.sid,
"body": body.hex(),
"method": self._session_kwargs.get("method", "GET"),
"url": canonicalize_url(self.url, keep_fragments=keep_fragments),
}
if include_kwargs:
kwargs = (key.lower() for key in self._session_kwargs.keys() if key.lower() not in ("data", "json"))
data["kwargs"] = "".join(set(_convert_to_bytes(key).hex() for key in kwargs))
if include_headers:
headers = self._session_kwargs.get("headers") or self._session_kwargs.get("extra_headers") or {}
processed_headers = {}
# Some header normalization
for key, value in headers.items():
processed_headers[_convert_to_bytes(key.lower()).hex()] = _convert_to_bytes(value.lower()).hex()
data["headers"] = tuple(processed_headers.items())
fp = hashlib.sha1(orjson.dumps(data, option=orjson.OPT_SORT_KEYS), usedforsecurity=False).digest()
self._fp = fp
return fp
def __repr__(self) -> str:
callback_name = getattr(self.callback, "__name__", None) or "None"
return f"<Request({self.url}) priority={self.priority} callback={callback_name}>"
def __str__(self) -> str:
return self.url
def __lt__(self, other: object) -> bool:
"""Compare requests by priority"""
if not isinstance(other, Request):
return NotImplemented
return self.priority < other.priority
def __gt__(self, other: object) -> bool:
"""Compare requests by priority"""
if not isinstance(other, Request):
return NotImplemented
return self.priority > other.priority
def __eq__(self, other: object) -> bool:
"""Requests are equal if they have the same fingerprint."""
if not isinstance(other, Request):
return NotImplemented
if self._fp is None or other._fp is None:
raise RuntimeError("Cannot compare requests before generating their fingerprints!")
return self._fp == other._fp
def __getstate__(self) -> dict[str, Any]:
"""Prepare state for pickling - store callback as name string for pickle compatibility."""
state = self.__dict__.copy()
state["_callback_name"] = getattr(self.callback, "__name__", None) if self.callback is not None else None
state["callback"] = None # Don't pickle the actual callable
return state
def __setstate__(self, state: dict[str, Any]) -> None:
"""Restore state from pickle - callback restored later via _restore_callback()."""
self._callback_name: str | None = state.pop("_callback_name", None)
self.__dict__.update(state)
def _restore_callback(self, spider: "Spider") -> None:
"""Restore callback from spider after unpickling.
:param spider: Spider instance to look up callback method on
"""
if hasattr(self, "_callback_name") and self._callback_name:
self.callback = getattr(spider, self._callback_name, None) or spider.parse
del self._callback_name
elif hasattr(self, "_callback_name"):
del self._callback_name
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/spiders/request.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:scrapling/spiders/result.py | from pathlib import Path
from dataclasses import dataclass, field
import orjson
from scrapling.core.utils import log
from scrapling.core._types import Any, Iterator, Dict, List, Tuple, Union
class ItemList(list):
"""A list of scraped items with export capabilities."""
def to_json(self, path: Union[str, Path], *, indent: bool = False):
"""Export items to a JSON file.
:param path: Path to the output file
:param indent: Pretty-print with 2-space indentation (slightly slower)
"""
options = orjson.OPT_SERIALIZE_NUMPY
if indent:
options |= orjson.OPT_INDENT_2
file = Path(path)
file.parent.mkdir(parents=True, exist_ok=True)
file.write_bytes(orjson.dumps(list(self), option=options))
log.info("Saved %d items to %s", len(self), path)
def to_jsonl(self, path: Union[str, Path]):
"""Export items as JSON Lines (one JSON object per line).
:param path: Path to the output file
"""
Path(path).parent.mkdir(parents=True, exist_ok=True)
with open(path, "wb") as f:
for item in self:
f.write(orjson.dumps(item, option=orjson.OPT_SERIALIZE_NUMPY))
f.write(b"\n")
log.info("Saved %d items to %s", len(self), path)
@dataclass
class CrawlStats:
"""Statistics for a crawl run."""
requests_count: int = 0
concurrent_requests: int = 0
concurrent_requests_per_domain: int = 0
failed_requests_count: int = 0
offsite_requests_count: int = 0
response_bytes: int = 0
items_scraped: int = 0
items_dropped: int = 0
start_time: float = 0.0
end_time: float = 0.0
download_delay: float = 0.0
blocked_requests_count: int = 0
custom_stats: Dict = field(default_factory=dict)
response_status_count: Dict = field(default_factory=dict)
domains_response_bytes: Dict = field(default_factory=dict)
sessions_requests_count: Dict = field(default_factory=dict)
proxies: List[str | Dict | Tuple] = field(default_factory=list)
log_levels_counter: Dict = field(default_factory=dict)
@property
def elapsed_seconds(self) -> float:
return self.end_time - self.start_time
@property
def requests_per_second(self) -> float:
if self.elapsed_seconds == 0:
return 0.0
return self.requests_count / self.elapsed_seconds
def increment_status(self, status: int) -> None:
self.response_status_count[f"status_{status}"] = self.response_status_count.get(f"status_{status}", 0) + 1
def increment_response_bytes(self, domain: str, count: int) -> None:
self.response_bytes += count
self.domains_response_bytes[domain] = self.domains_response_bytes.get(domain, 0) + count
def increment_requests_count(self, sid: str) -> None:
self.requests_count += 1
self.sessions_requests_count[sid] = self.sessions_requests_count.get(sid, 0) + 1
def to_dict(self) -> dict[str, Any]:
return {
"items_scraped": self.items_scraped,
"items_dropped": self.items_dropped,
"elapsed_seconds": round(self.elapsed_seconds, 2),
"download_delay": round(self.download_delay, 2),
"concurrent_requests": self.concurrent_requests,
"concurrent_requests_per_domain": self.concurrent_requests_per_domain,
"requests_count": self.requests_count,
"requests_per_second": round(self.requests_per_second, 2),
"sessions_requests_count": self.sessions_requests_count,
"failed_requests_count": self.failed_requests_count,
"offsite_requests_count": self.offsite_requests_count,
"blocked_requests_count": self.blocked_requests_count,
"response_status_count": self.response_status_count,
"response_bytes": self.response_bytes,
"domains_response_bytes": self.domains_response_bytes,
"proxies": self.proxies,
"custom_stats": self.custom_stats,
"log_count": self.log_levels_counter,
}
@dataclass
class CrawlResult:
"""Complete result from a spider run."""
stats: CrawlStats
items: ItemList
paused: bool = False
@property
def completed(self) -> bool:
"""True if the crawl completed normally (not paused)."""
return not self.paused
def __len__(self) -> int:
return len(self.items)
def __iter__(self) -> Iterator[dict[str, Any]]:
return iter(self.items)
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/spiders/result.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
D4Vinci/Scrapling:scrapling/spiders/scheduler.py | import asyncio
from itertools import count
from scrapling.core.utils import log
from scrapling.spiders.request import Request
from scrapling.core._types import List, Set, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from scrapling.spiders.checkpoint import CheckpointData
class Scheduler:
"""
Priority queue with URL deduplication. (heapq)
Higher priority requests are processed first.
Duplicate URLs are filtered unless dont_filter=True.
"""
def __init__(self, include_kwargs: bool = False, include_headers: bool = False, keep_fragments: bool = False):
self._queue: asyncio.PriorityQueue[tuple[int, int, Request]] = asyncio.PriorityQueue()
self._seen: set[bytes] = set()
self._counter = count()
# Mirror dict for snapshot without draining queue
self._pending: dict[int, tuple[int, int, Request]] = {}
self._include_kwargs = include_kwargs
self._include_headers = include_headers
self._keep_fragments = keep_fragments
async def enqueue(self, request: Request) -> bool:
"""Add a request to the queue."""
fingerprint = request.update_fingerprint(self._include_kwargs, self._include_headers, self._keep_fragments)
if not request.dont_filter and fingerprint in self._seen:
log.debug("Dropped duplicate request: %s", request)
return False
self._seen.add(fingerprint)
# Negative priority so higher priority = dequeued first
counter = next(self._counter)
item = (-request.priority, counter, request)
self._pending[counter] = item
await self._queue.put(item)
return True
async def dequeue(self) -> Request:
"""Get the next request to process."""
_, counter, request = await self._queue.get()
self._pending.pop(counter, None)
return request
def __len__(self) -> int:
return self._queue.qsize()
@property
def is_empty(self) -> bool:
return self._queue.empty()
def snapshot(self) -> Tuple[List[Request], Set[bytes]]:
"""Create a snapshot of the current state for checkpoints."""
sorted_items = sorted(self._pending.values(), key=lambda x: (x[0], x[1])) # Maintain queue order
requests = [item[2] for item in sorted_items]
return requests, self._seen.copy()
def restore(self, data: "CheckpointData") -> None:
"""Restore scheduler state from checkpoint data.
:param data: CheckpointData containing requests and seen set
"""
self._seen = data.seen.copy()
# Restore pending requests in order (they're already sorted by priority)
for request in data.requests:
counter = next(self._counter)
item = (-request.priority, counter, request)
self._pending[counter] = item
self._queue.put_nowait(item)
log.info(f"Scheduler restored: {len(data.requests)} requests, {len(data.seen)} seen")
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/spiders/scheduler.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
D4Vinci/Scrapling:scrapling/spiders/session.py | from asyncio import Lock
from scrapling.spiders.request import Request
from scrapling.engines.static import _ASyncSessionLogic
from scrapling.engines.toolbelt.convertor import Response
from scrapling.core._types import Set, cast, SUPPORTED_HTTP_METHODS
from scrapling.fetchers import AsyncDynamicSession, AsyncStealthySession, FetcherSession
Session = FetcherSession | AsyncDynamicSession | AsyncStealthySession
class SessionManager:
"""Manages pre-configured session instances."""
def __init__(self) -> None:
self._sessions: dict[str, Session] = {}
self._default_session_id: str | None = None
self._started: bool = False
self._lazy_sessions: Set[str] = set()
self._lazy_lock = Lock()
def add(self, session_id: str, session: Session, *, default: bool = False, lazy: bool = False) -> "SessionManager":
"""Register a session instance.
:param session_id: Name to reference this session in requests
:param session: Your pre-configured session instance
:param default: If True, this becomes the default session
:param lazy: If True, the session will be started only when a request uses its ID.
"""
if session_id in self._sessions:
raise ValueError(f"Session '{session_id}' already registered")
self._sessions[session_id] = session
if default or self._default_session_id is None:
self._default_session_id = session_id
if lazy:
self._lazy_sessions.add(session_id)
return self
def remove(self, session_id: str) -> None:
"""Removes a session.
:param session_id: ID of session to remove
"""
_ = self.pop(session_id)
def pop(self, session_id: str) -> Session:
"""Remove and returns a session.
:param session_id: ID of session to remove
"""
if session_id not in self._sessions:
raise KeyError(f"Session '{session_id}' not found")
session = self._sessions.pop(session_id)
if session_id in self._lazy_sessions:
self._lazy_sessions.remove(session_id)
if session and self._default_session_id == session_id:
self._default_session_id = next(iter(self._sessions), None)
return session
@property
def default_session_id(self) -> str:
if self._default_session_id is None:
raise RuntimeError("No sessions registered")
return self._default_session_id
@property
def session_ids(self) -> list[str]:
return list(self._sessions.keys())
def get(self, session_id: str) -> Session:
if session_id not in self._sessions:
available = ", ".join(self._sessions.keys())
raise KeyError(f"Session '{session_id}' not found. Available: {available}")
return self._sessions[session_id]
async def start(self) -> None:
"""Start all sessions that aren't already alive."""
if self._started:
return
for sid, session in self._sessions.items():
if sid not in self._lazy_sessions and not session._is_alive:
await session.__aenter__()
self._started = True
async def close(self) -> None:
"""Close all registered sessions."""
for session in self._sessions.values():
_ = await session.__aexit__(None, None, None)
self._started = False
async def fetch(self, request: Request) -> Response:
sid = request.sid if request.sid else self.default_session_id
session = self.get(sid)
if session:
if sid in self._lazy_sessions and not session._is_alive:
async with self._lazy_lock:
if not session._is_alive:
await session.__aenter__()
if isinstance(session, FetcherSession):
client = session._client
if isinstance(client, _ASyncSessionLogic):
response = await client._make_request(
method=cast(SUPPORTED_HTTP_METHODS, request._session_kwargs.pop("method", "GET")),
url=request.url,
**request._session_kwargs,
)
else:
# Sync session or other types - shouldn't happen in async context
raise TypeError(f"Session type {type(client)} not supported for async fetch")
else:
response = await session.fetch(url=request.url, **request._session_kwargs)
response.request = request
# Merge request meta into response meta (response meta takes priority)
response.meta = {**request.meta, **response.meta}
return response
raise RuntimeError("No session found with the request session id")
async def __aenter__(self) -> "SessionManager":
await self.start()
return self
async def __aexit__(self, *exc) -> None:
await self.close()
def __contains__(self, session_id: str) -> bool:
"""Check if a session ID is registered."""
return session_id in self._sessions
def __len__(self) -> int:
"""Number of registered sessions."""
return len(self._sessions)
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/spiders/session.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:scrapling/spiders/spider.py | import signal
import logging
from pathlib import Path
from abc import ABC, abstractmethod
import anyio
from anyio import Path as AsyncPath
from scrapling.spiders.request import Request
from scrapling.spiders.engine import CrawlerEngine
from scrapling.spiders.session import SessionManager
from scrapling.core.utils import set_logger, reset_logger
from scrapling.spiders.result import CrawlResult, CrawlStats
from scrapling.core._types import Set, Any, Dict, Optional, Union, TYPE_CHECKING, AsyncGenerator
BLOCKED_CODES = {401, 403, 407, 429, 444, 500, 502, 503, 504}
if TYPE_CHECKING:
from scrapling.engines.toolbelt.custom import Response
class LogCounterHandler(logging.Handler):
"""A logging handler that counts log messages by level."""
def __init__(self):
super().__init__()
self.counts = {
logging.DEBUG: 0,
logging.INFO: 0,
logging.WARNING: 0,
logging.ERROR: 0,
logging.CRITICAL: 0,
}
def emit(self, record: logging.LogRecord) -> None:
level = record.levelno
# Map to the closest standard level
if level >= logging.CRITICAL:
self.counts[logging.CRITICAL] += 1
elif level >= logging.ERROR:
self.counts[logging.ERROR] += 1
elif level >= logging.WARNING:
self.counts[logging.WARNING] += 1
elif level >= logging.INFO:
self.counts[logging.INFO] += 1
else:
self.counts[logging.DEBUG] += 1
def get_counts(self) -> Dict[str, int]:
"""Return counts as a dictionary with string keys."""
return {
"debug": self.counts[logging.DEBUG],
"info": self.counts[logging.INFO],
"warning": self.counts[logging.WARNING],
"error": self.counts[logging.ERROR],
"critical": self.counts[logging.CRITICAL],
}
class SessionConfigurationError(Exception):
"""Raised when session configuration fails."""
pass
class Spider(ABC):
"""An abstract base class for creating web spiders.
Check the documentation website for more information.
"""
name: Optional[str] = None
start_urls: list[str] = []
allowed_domains: Set[str] = set()
# Concurrency settings
concurrent_requests: int = 4
concurrent_requests_per_domain: int = 0
download_delay: float = 0.0
max_blocked_retries: int = 3
# Fingerprint adjustments
fp_include_kwargs: bool = False
fp_keep_fragments: bool = False
fp_include_headers: bool = False
# Logging settings
logging_level: int = logging.DEBUG
logging_format: str = "[%(asctime)s]:({spider_name}) %(levelname)s: %(message)s"
logging_date_format: str = "%Y-%m-%d %H:%M:%S"
log_file: Optional[str] = None
def __init__(self, crawldir: Optional[Union[str, Path, AsyncPath]] = None, interval: float = 300.0):
"""Initialize the spider.
:param crawldir: Directory for checkpoint files. If provided, enables pause/resume.
:param interval: Seconds between periodic checkpoint saves (default 5 minutes).
"""
if self.name is None:
raise ValueError(f"{self.__class__.__name__} must have a name.")
self.logger = logging.getLogger(f"scrapling.spiders.{self.name}")
self.logger.setLevel(self.logging_level)
self.logger.handlers.clear()
self.logger.propagate = False # Don't propagate to parent 'scrapling' logger
formatter = logging.Formatter(
fmt=self.logging_format.format(spider_name=self.name), datefmt=self.logging_date_format
)
# Add a log counter handler to track log counts by level
self._log_counter = LogCounterHandler()
self.logger.addHandler(self._log_counter)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
if self.log_file:
Path(self.log_file).parent.mkdir(parents=True, exist_ok=True)
file_handler = logging.FileHandler(self.log_file)
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
self.crawldir: Optional[Path] = Path(crawldir) if crawldir else None
self._interval = interval
self._engine: Optional[CrawlerEngine] = None
self._original_sigint_handler: Any = None
self._session_manager = SessionManager()
try:
self.configure_sessions(self._session_manager)
except Exception as e:
raise SessionConfigurationError(f"Error in {self.__class__.__name__}.configure_sessions(): {e}") from e
if len(self._session_manager) == 0:
raise SessionConfigurationError(f"{self.__class__.__name__}.configure_sessions() did not add any sessions")
self.logger.info("Spider initialized")
async def start_requests(self) -> AsyncGenerator[Request, None]:
"""Generate initial requests to start the crawl.
By default, this generates Request objects for each URL in `start_urls`
using the session manager's default session and `parse()` as callback.
Override this method for more control over initial requests
(e.g., to add custom headers, use different callbacks, etc.)
"""
if not self.start_urls:
raise RuntimeError(
"Spider has no starting point, either set `start_urls` or override `start_requests` function."
)
for url in self.start_urls:
yield Request(url, sid=self._session_manager.default_session_id)
@abstractmethod
async def parse(self, response: "Response") -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
"""Default callback for processing responses"""
raise NotImplementedError(f"{self.__class__.__name__} must implement parse() method")
yield # Make this a generator for type checkers
async def on_start(self, resuming: bool = False) -> None:
"""Called before crawling starts. Override for setup logic.
:param resuming: It's enabled if the spider is resuming from a checkpoint, left for the user to use.
"""
if resuming:
self.logger.debug("Resuming spider from checkpoint")
else:
self.logger.debug("Starting spider")
async def on_close(self) -> None:
"""Called after crawling finishes. Override for cleanup logic."""
self.logger.debug("Spider closed")
async def on_error(self, request: Request, error: Exception) -> None:
"""
Handle request errors for all spider requests.
Override for custom error handling.
"""
pass
async def on_scraped_item(self, item: Dict[str, Any]) -> Dict[str, Any] | None:
"""A hook to be overridden by users to do some processing on scraped items, return `None` to drop the item silently."""
return item
async def is_blocked(self, response: "Response") -> bool:
"""Check if the response is blocked. Users should override this for custom detection logic."""
if response.status in BLOCKED_CODES:
return True
return False
async def retry_blocked_request(self, request: Request, response: "Response") -> Request:
"""Users should override this to prepare the blocked request before retrying, if needed."""
return request
def __repr__(self) -> str:
"""String representation of the spider."""
return f"<{self.__class__.__name__} '{self.name}'>"
def configure_sessions(self, manager: SessionManager) -> None:
"""Configure sessions for this spider.
Override this method to add custom sessions.
The default implementation creates a FetcherSession session.
The first session added becomes the default for `start_requests()` unless specified otherwise.
:param manager: SessionManager to configure
"""
from scrapling.fetchers import FetcherSession
manager.add("default", FetcherSession())
def pause(self):
"""Request graceful shutdown of the crawling process."""
if self._engine:
self._engine.request_pause()
else:
raise RuntimeError("No active crawl to stop")
def _setup_signal_handler(self) -> None:
"""Set up SIGINT handler for graceful pause."""
def handler(_signum: int, _frame: Any) -> None:
if self._engine:
self._engine.request_pause()
else:
# No engine yet, just raise KeyboardInterrupt
raise KeyboardInterrupt
try:
self._original_sigint_handler = signal.signal(signal.SIGINT, handler)
except ValueError:
self._original_sigint_handler = None
def _restore_signal_handler(self) -> None:
"""Restore original SIGINT handler."""
if self._original_sigint_handler is not None:
try:
signal.signal(signal.SIGINT, self._original_sigint_handler)
except ValueError:
pass
async def __run(self) -> CrawlResult:
token = set_logger(self.logger)
try:
self._engine = CrawlerEngine(self, self._session_manager, self.crawldir, self._interval)
stats = await self._engine.crawl()
paused = self._engine.paused
return CrawlResult(stats=stats, items=self._engine.items, paused=paused)
finally:
self._engine = None
reset_logger(token)
# Close any file handlers to release file resources.
if self.log_file:
for handler in self.logger.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
def start(self, use_uvloop: bool = False, **backend_options: Any) -> CrawlResult:
"""Run the spider and return results.
This is the main entry point for running a spider.
Handles async execution internally via anyio.
Pressing Ctrl+C will initiate graceful shutdown (waits for active tasks to complete).
Pressing Ctrl+C a second time will force immediate stop.
If crawldir is set, a checkpoint will also be saved on graceful shutdown,
allowing you to resume the crawl later by running the spider again.
:param use_uvloop: Whether to use the faster uvloop/winloop event loop implementation, if available.
:param backend_options: Asyncio backend options to be used with `anyio.run`
"""
backend_options = backend_options or {}
if use_uvloop:
backend_options.update({"use_uvloop": True})
# Set up SIGINT handler for graceful shutdown
self._setup_signal_handler()
try:
return anyio.run(self.__run, backend="asyncio", backend_options=backend_options)
finally:
self._restore_signal_handler()
async def stream(self) -> AsyncGenerator[Dict[str, Any], None]:
"""Stream items as they're scraped. Ideal for long-running spiders or building applications on top of the spiders.
Must be called from an async context. Yields items one by one as they are scraped.
Access `spider.stats` during iteration for real-time statistics.
Note: SIGINT handling for pause/resume is not available in stream mode.
"""
token = set_logger(self.logger)
try:
self._engine = CrawlerEngine(self, self._session_manager, self.crawldir, self._interval)
async for item in self._engine:
yield item
finally:
self._engine = None
reset_logger(token)
if self.log_file:
for handler in self.logger.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
@property
def stats(self) -> CrawlStats:
"""Access current crawl stats (works during streaming)."""
if self._engine:
return self._engine.stats
raise RuntimeError("No active crawl. Use this property inside `async for item in spider.stream():`")
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/spiders/spider.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:tests/fetchers/test_proxy_rotation.py | import pytest
import random
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
from scrapling.engines.toolbelt import ProxyRotator, is_proxy_error, cyclic_rotation
class TestCyclicRotationStrategy:
"""Test the default cyclic_rotation strategy function"""
def test_cyclic_rotation_cycles_through_proxies(self):
"""Test that cyclic_rotation returns proxies in order"""
proxies = ["http://p1:8080", "http://p2:8080", "http://p3:8080"]
proxy, next_idx = cyclic_rotation(proxies, 0)
assert proxy == "http://p1:8080"
assert next_idx == 1
proxy, next_idx = cyclic_rotation(proxies, 1)
assert proxy == "http://p2:8080"
assert next_idx == 2
proxy, next_idx = cyclic_rotation(proxies, 2)
assert proxy == "http://p3:8080"
assert next_idx == 0 # Wraps around
def test_cyclic_rotation_wraps_index(self):
"""Test that cyclic_rotation handles index overflow"""
proxies = ["http://p1:8080", "http://p2:8080"]
# Index larger than list length should wrap
proxy, next_idx = cyclic_rotation(proxies, 5)
assert proxy == "http://p2:8080" # 5 % 2 = 1
assert next_idx == 0
class TestProxyRotatorCreation:
"""Test ProxyRotator initialization and validation"""
def test_create_with_string_proxies(self):
"""Test creating rotator with string proxy URLs"""
proxies = ["http://p1:8080", "http://p2:8080"]
rotator = ProxyRotator(proxies)
assert len(rotator) == 2
assert rotator.proxies == proxies
def test_create_with_dict_proxies(self):
"""Test creating rotator with dict proxies"""
proxies = [
{"server": "http://p1:8080", "username": "user1", "password": "pass1"},
{"server": "http://p2:8080"},
]
rotator = ProxyRotator(proxies)
assert len(rotator) == 2
assert rotator.proxies == proxies
def test_create_with_mixed_proxies(self):
"""Test creating rotator with mixed string and dict proxies"""
proxies = [
"http://p1:8080",
{"server": "http://p2:8080", "username": "user"},
]
rotator = ProxyRotator(proxies)
assert len(rotator) == 2
def test_empty_proxies_raises_error(self):
"""Test that empty proxy list raises ValueError"""
with pytest.raises(ValueError, match="At least one proxy must be provided"):
ProxyRotator([])
def test_dict_without_server_raises_error(self):
"""Test that dict proxy without 'server' key raises ValueError"""
with pytest.raises(ValueError, match="Proxy dict must have a 'server' key"):
ProxyRotator([{"username": "user", "password": "pass"}])
def test_invalid_proxy_type_raises_error(self):
"""Test that invalid proxy type raises TypeError"""
with pytest.raises(TypeError, match="Invalid proxy type"):
ProxyRotator([123])
with pytest.raises(TypeError, match="Invalid proxy type"):
ProxyRotator([None])
def test_non_callable_strategy_raises_error(self):
"""Test that non-callable strategy raises TypeError"""
with pytest.raises(TypeError, match="strategy must be callable"):
ProxyRotator(["http://p1:8080"], strategy="cyclic_rotation")
with pytest.raises(TypeError, match="strategy must be callable"):
ProxyRotator(["http://p1:8080"], strategy=123)
class TestProxyRotatorRotation:
"""Test ProxyRotator rotation behavior"""
def test_get_proxy_cyclic_rotation(self):
"""Test that get_proxy cycles through proxies in order"""
proxies = ["http://p1:8080", "http://p2:8080", "http://p3:8080"]
rotator = ProxyRotator(proxies)
# First cycle
assert rotator.get_proxy() == "http://p1:8080"
assert rotator.get_proxy() == "http://p2:8080"
assert rotator.get_proxy() == "http://p3:8080"
# Second cycle - wraps around
assert rotator.get_proxy() == "http://p1:8080"
assert rotator.get_proxy() == "http://p2:8080"
assert rotator.get_proxy() == "http://p3:8080"
def test_get_proxy_single_proxy(self):
"""Test rotation with single proxy always returns the same proxy"""
rotator = ProxyRotator(["http://only:8080"])
for _ in range(5):
assert rotator.get_proxy() == "http://only:8080"
def test_get_proxy_with_dict_proxies(self):
"""Test rotation with dict proxies"""
proxies = [
{"server": "http://p1:8080"},
{"server": "http://p2:8080"},
]
rotator = ProxyRotator(proxies)
assert rotator.get_proxy() == {"server": "http://p1:8080"}
assert rotator.get_proxy() == {"server": "http://p2:8080"}
assert rotator.get_proxy() == {"server": "http://p1:8080"}
class TestCustomStrategies:
"""Test ProxyRotator with custom rotation strategies"""
def test_random_strategy(self):
"""Test custom random selection strategy"""
def random_strategy(proxies, idx):
return random.choice(proxies), idx
proxies = ["http://p1:8080", "http://p2:8080", "http://p3:8080"]
rotator = ProxyRotator(proxies, strategy=random_strategy)
# Get multiple proxies - they should all be valid
results = [rotator.get_proxy() for _ in range(10)]
assert all(p in proxies for p in results)
def test_sticky_strategy(self):
"""Test custom sticky strategy that always returns first proxy"""
def sticky_strategy(proxies, idx):
return proxies[0], idx
rotator = ProxyRotator(
["http://p1:8080", "http://p2:8080"],
strategy=sticky_strategy
)
for _ in range(5):
assert rotator.get_proxy() == "http://p1:8080"
def test_weighted_strategy(self):
"""Test custom weighted strategy"""
call_count = {"count": 0}
def alternating_strategy(proxies, idx):
# Returns first proxy twice, then second proxy once
call_count["count"] += 1
if call_count["count"] % 3 == 0:
return proxies[1], idx
return proxies[0], idx
rotator = ProxyRotator(
["http://primary:8080", "http://backup:8080"],
strategy=alternating_strategy
)
assert rotator.get_proxy() == "http://primary:8080"
assert rotator.get_proxy() == "http://primary:8080"
assert rotator.get_proxy() == "http://backup:8080"
def test_lambda_strategy(self):
"""Test using lambda as strategy"""
rotator = ProxyRotator(
["http://p1:8080", "http://p2:8080"],
strategy=lambda proxies, idx: (proxies[-1], idx) # Always last
)
assert rotator.get_proxy() == "http://p2:8080"
assert rotator.get_proxy() == "http://p2:8080"
class TestProxyRotatorProperties:
"""Test ProxyRotator properties and methods"""
def test_proxies_property_returns_copy(self):
"""Test that proxies property returns a copy, not the original list"""
original = ["http://p1:8080", "http://p2:8080"]
rotator = ProxyRotator(original)
proxies_copy = rotator.proxies
proxies_copy.append("http://p3:8080")
# Original should be unchanged
assert len(rotator) == 2
assert len(rotator.proxies) == 2
def test_len_returns_proxy_count(self):
"""Test __len__ returns correct count"""
assert len(ProxyRotator(["http://p1:8080"])) == 1
assert len(ProxyRotator(["http://p1:8080", "http://p2:8080"])) == 2
assert len(ProxyRotator(["a", "b", "c", "d", "e"])) == 5
def test_repr(self):
"""Test __repr__ format"""
rotator = ProxyRotator(["http://p1:8080", "http://p2:8080", "http://p3:8080"])
assert repr(rotator) == "ProxyRotator(proxies=3)"
class TestProxyRotatorThreadSafety:
"""Test ProxyRotator thread safety"""
def test_concurrent_get_proxy(self):
"""Test that concurrent get_proxy calls don't cause errors"""
proxies = [f"http://p{i}:8080" for i in range(10)]
rotator = ProxyRotator(proxies)
results = []
def get_proxies(n):
for _ in range(n):
results.append(rotator.get_proxy())
threads = [Thread(target=get_proxies, args=(100,)) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
# All results should be valid proxies
assert len(results) == 1000
assert all(p in proxies for p in results)
def test_thread_pool_concurrent_access(self):
"""Test concurrent access using ThreadPoolExecutor"""
proxies = ["http://p1:8080", "http://p2:8080", "http://p3:8080"]
rotator = ProxyRotator(proxies)
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(rotator.get_proxy) for _ in range(100)]
results = [f.result() for f in futures]
assert len(results) == 100
assert all(p in proxies for p in results)
class TestIsProxyError:
"""Test is_proxy_error utility function"""
@pytest.mark.parametrize("error_msg", [
"net::err_proxy_connection_failed",
"NET::ERR_PROXY_AUTH_FAILED",
"net::err_tunnel_connection_failed",
"Connection refused by proxy",
"Connection reset by peer",
"Connection timed out while connecting to proxy",
"Failed to connect to proxy server",
"Could not resolve proxy host",
])
def test_proxy_errors_detected(self, error_msg):
"""Test that proxy-related errors are detected"""
assert is_proxy_error(Exception(error_msg)) is True
@pytest.mark.parametrize("error_msg", [
"Page not found",
"404 Not Found",
"Internal server error",
"DNS resolution failed",
"SSL certificate error",
"Timeout waiting for response",
"Invalid JSON response",
])
def test_non_proxy_errors_not_detected(self, error_msg):
"""Test that non-proxy errors are not detected as proxy errors"""
assert is_proxy_error(Exception(error_msg)) is False
def test_case_insensitive_detection(self):
"""Test that error detection is case-insensitive"""
assert is_proxy_error(Exception("NET::ERR_PROXY")) is True
assert is_proxy_error(Exception("Net::Err_Proxy")) is True
assert is_proxy_error(Exception("CONNECTION REFUSED")) is True
def test_empty_error_message(self):
"""Test handling of empty error message"""
assert is_proxy_error(Exception("")) is False
def test_custom_exception_types(self):
"""Test with custom exception types"""
class CustomError(Exception):
pass
assert is_proxy_error(CustomError("net::err_proxy_failed")) is True
assert is_proxy_error(CustomError("normal error")) is False
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/test_proxy_rotation.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/spiders/test_checkpoint.py | """Tests for the CheckpointManager and CheckpointData classes."""
import pickle
import tempfile
from pathlib import Path
import pytest
import anyio
from scrapling.spiders.request import Request
from scrapling.spiders.checkpoint import CheckpointData, CheckpointManager
class TestCheckpointData:
"""Test CheckpointData dataclass."""
def test_default_values(self):
"""Test CheckpointData with default values."""
data = CheckpointData()
assert data.requests == []
assert data.seen == set()
def test_with_requests_and_seen(self):
"""Test CheckpointData with requests and seen URLs."""
requests = [
Request("https://example.com/1", priority=10),
Request("https://example.com/2", priority=5),
]
seen = {"url1", "url2", "url3"}
data = CheckpointData(requests=requests, seen=seen)
assert len(data.requests) == 2
assert data.requests[0].url == "https://example.com/1"
assert data.seen == {"url1", "url2", "url3"}
def test_pickle_roundtrip(self):
"""Test that CheckpointData can be pickled and unpickled."""
requests = [Request("https://example.com", priority=5)]
seen = {"fingerprint1", "fingerprint2"}
data = CheckpointData(requests=requests, seen=seen)
pickled = pickle.dumps(data)
restored = pickle.loads(pickled)
assert len(restored.requests) == 1
assert restored.requests[0].url == "https://example.com"
assert restored.seen == {"fingerprint1", "fingerprint2"}
class TestCheckpointManagerInit:
"""Test CheckpointManager initialization."""
def test_init_with_string_path(self):
"""Test initialization with string path."""
manager = CheckpointManager("/tmp/test_crawl")
assert str(manager.crawldir) == "/tmp/test_crawl"
assert manager.interval == 300.0
def test_init_with_pathlib_path(self):
"""Test initialization with pathlib.Path."""
path = Path("/tmp/test_crawl")
manager = CheckpointManager(path)
assert str(manager.crawldir) == "/tmp/test_crawl"
def test_init_with_custom_interval(self):
"""Test initialization with custom interval."""
manager = CheckpointManager("/tmp/test", interval=60.0)
assert manager.interval == 60.0
def test_init_with_zero_interval(self):
"""Test initialization with zero interval (disable periodic checkpoints)."""
manager = CheckpointManager("/tmp/test", interval=0)
assert manager.interval == 0
def test_init_with_negative_interval_raises(self):
"""Test that negative interval raises ValueError."""
with pytest.raises(ValueError, match="greater than 0"):
CheckpointManager("/tmp/test", interval=-1)
def test_init_with_invalid_interval_type_raises(self):
"""Test that invalid interval type raises TypeError."""
with pytest.raises(TypeError, match="integer or float"):
CheckpointManager("/tmp/test", interval="invalid") # type: ignore
def test_checkpoint_file_path(self):
"""Test that checkpoint file path is correctly constructed."""
manager = CheckpointManager("/tmp/test_crawl")
expected_path = "/tmp/test_crawl/checkpoint.pkl"
assert str(manager._checkpoint_path) == expected_path
class TestCheckpointManagerOperations:
"""Test CheckpointManager save/load/cleanup operations."""
@pytest.fixture
def temp_dir(self):
"""Create a temporary directory for testing."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.mark.asyncio
async def test_has_checkpoint_false_when_no_file(self, temp_dir: Path):
"""Test has_checkpoint returns False when no checkpoint exists."""
manager = CheckpointManager(temp_dir / "crawl")
result = await manager.has_checkpoint()
assert result is False
@pytest.mark.asyncio
async def test_save_creates_checkpoint_file(self, temp_dir: Path):
"""Test that save creates the checkpoint file."""
crawl_dir = temp_dir / "crawl"
manager = CheckpointManager(crawl_dir)
data = CheckpointData(
requests=[Request("https://example.com")],
seen={"fp1", "fp2"},
)
await manager.save(data)
checkpoint_path = crawl_dir / "checkpoint.pkl"
assert checkpoint_path.exists()
@pytest.mark.asyncio
async def test_save_creates_directory_if_not_exists(self, temp_dir: Path):
"""Test that save creates the directory if it doesn't exist."""
crawl_dir = temp_dir / "nested" / "crawl" / "dir"
manager = CheckpointManager(crawl_dir)
data = CheckpointData()
await manager.save(data)
assert crawl_dir.exists()
@pytest.mark.asyncio
async def test_has_checkpoint_true_after_save(self, temp_dir: Path):
"""Test has_checkpoint returns True after saving."""
manager = CheckpointManager(temp_dir / "crawl")
data = CheckpointData()
await manager.save(data)
result = await manager.has_checkpoint()
assert result is True
@pytest.mark.asyncio
async def test_load_returns_none_when_no_checkpoint(self, temp_dir: Path):
"""Test load returns None when no checkpoint exists."""
manager = CheckpointManager(temp_dir / "crawl")
result = await manager.load()
assert result is None
@pytest.mark.asyncio
async def test_save_and_load_roundtrip(self, temp_dir: Path):
"""Test saving and loading checkpoint data."""
manager = CheckpointManager(temp_dir / "crawl")
original_data = CheckpointData(
requests=[
Request("https://example.com/1", priority=10),
Request("https://example.com/2", priority=5),
],
seen={"fp1", "fp2", "fp3"},
)
await manager.save(original_data)
loaded_data = await manager.load()
assert loaded_data is not None
assert len(loaded_data.requests) == 2
assert loaded_data.requests[0].url == "https://example.com/1"
assert loaded_data.requests[0].priority == 10
assert loaded_data.seen == {"fp1", "fp2", "fp3"}
@pytest.mark.asyncio
async def test_save_is_atomic(self, temp_dir: Path):
"""Test that save uses atomic write (temp file + rename)."""
crawl_dir = temp_dir / "crawl"
manager = CheckpointManager(crawl_dir)
data = CheckpointData(requests=[Request("https://example.com")])
await manager.save(data)
# Temp file should not exist after successful save
temp_path = crawl_dir / "checkpoint.tmp"
assert not temp_path.exists()
# Checkpoint file should exist
checkpoint_path = crawl_dir / "checkpoint.pkl"
assert checkpoint_path.exists()
@pytest.mark.asyncio
async def test_cleanup_removes_checkpoint_file(self, temp_dir: Path):
"""Test that cleanup removes the checkpoint file."""
crawl_dir = temp_dir / "crawl"
manager = CheckpointManager(crawl_dir)
# Save a checkpoint first
data = CheckpointData()
await manager.save(data)
checkpoint_path = crawl_dir / "checkpoint.pkl"
assert checkpoint_path.exists()
# Cleanup should remove it
await manager.cleanup()
assert not checkpoint_path.exists()
@pytest.mark.asyncio
async def test_cleanup_no_error_when_no_file(self, temp_dir: Path):
"""Test that cleanup doesn't raise error when no file exists."""
manager = CheckpointManager(temp_dir / "crawl")
# Should not raise
await manager.cleanup()
@pytest.mark.asyncio
async def test_load_returns_none_on_corrupt_file(self, temp_dir: Path):
"""Test load returns None when checkpoint file is corrupt."""
crawl_dir = temp_dir / "crawl"
crawl_dir.mkdir(parents=True)
checkpoint_path = crawl_dir / "checkpoint.pkl"
checkpoint_path.write_bytes(b"not valid pickle data")
manager = CheckpointManager(crawl_dir)
result = await manager.load()
assert result is None
@pytest.mark.asyncio
async def test_multiple_saves_overwrite(self, temp_dir: Path):
"""Test that multiple saves overwrite the checkpoint."""
manager = CheckpointManager(temp_dir / "crawl")
# First save
data1 = CheckpointData(
requests=[Request("https://example.com/1")],
seen={"fp1"},
)
await manager.save(data1)
# Second save
data2 = CheckpointData(
requests=[Request("https://example.com/2"), Request("https://example.com/3")],
seen={"fp2", "fp3"},
)
await manager.save(data2)
# Load should return the second save
loaded = await manager.load()
assert loaded is not None
assert len(loaded.requests) == 2
assert loaded.requests[0].url == "https://example.com/2"
assert loaded.seen == {"fp2", "fp3"}
class TestCheckpointManagerEdgeCases:
"""Test edge cases for CheckpointManager."""
@pytest.fixture
def temp_dir(self):
"""Create a temporary directory for testing."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.mark.asyncio
async def test_save_empty_checkpoint(self, temp_dir: Path):
"""Test saving empty checkpoint data."""
manager = CheckpointManager(temp_dir / "crawl")
data = CheckpointData(requests=[], seen=set())
await manager.save(data)
loaded = await manager.load()
assert loaded is not None
assert loaded.requests == []
assert loaded.seen == set()
@pytest.mark.asyncio
async def test_save_large_checkpoint(self, temp_dir: Path):
"""Test saving checkpoint with many requests."""
manager = CheckpointManager(temp_dir / "crawl")
# Create 1000 requests
requests = [
Request(f"https://example.com/{i}", priority=i % 10)
for i in range(1000)
]
seen = {f"fp_{i}" for i in range(2000)}
data = CheckpointData(requests=requests, seen=seen)
await manager.save(data)
loaded = await manager.load()
assert loaded is not None
assert len(loaded.requests) == 1000
assert len(loaded.seen) == 2000
@pytest.mark.asyncio
async def test_requests_preserve_metadata(self, temp_dir: Path):
"""Test that request metadata is preserved through checkpoint."""
manager = CheckpointManager(temp_dir / "crawl")
original_request = Request(
url="https://example.com",
sid="my_session",
priority=42,
dont_filter=True,
meta={"item_id": 123, "page": 5},
proxy="http://proxy:8080",
)
data = CheckpointData(requests=[original_request], seen=set())
await manager.save(data)
loaded = await manager.load()
assert loaded is not None
restored = loaded.requests[0]
assert restored.url == "https://example.com"
assert restored.sid == "my_session"
assert restored.priority == 42
assert restored.dont_filter is True
assert restored.meta == {"item_id": 123, "page": 5}
assert restored._session_kwargs == {"proxy": "http://proxy:8080"}
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/spiders/test_checkpoint.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/spiders/test_engine.py | """Tests for the CrawlerEngine class."""
import tempfile
from pathlib import Path
import anyio
import pytest
from scrapling.spiders.engine import CrawlerEngine, _dump
from scrapling.spiders.request import Request
from scrapling.spiders.session import SessionManager
from scrapling.spiders.result import CrawlStats, ItemList
from scrapling.spiders.checkpoint import CheckpointData
from scrapling.core._types import Any, Dict, Set, AsyncGenerator
# ---------------------------------------------------------------------------
# Mock helpers
# ---------------------------------------------------------------------------
class MockResponse:
"""Minimal Response stand-in."""
def __init__(self, status: int = 200, body: bytes = b"ok", url: str = "https://example.com"):
self.status = status
self.body = body
self.url = url
self.request: Any = None
self.meta: Dict[str, Any] = {}
def __str__(self) -> str:
return self.url
class MockSession:
"""Mock session that returns a canned response."""
def __init__(self, name: str = "mock", response: MockResponse | None = None):
self.name = name
self._is_alive = False
self._response = response or MockResponse()
self.fetch_calls: list[dict] = []
async def __aenter__(self):
self._is_alive = True
return self
async def __aexit__(self, *args):
self._is_alive = False
async def fetch(self, url: str, **kwargs):
self.fetch_calls.append({"url": url, **kwargs})
resp = MockResponse(status=self._response.status, body=self._response.body, url=url)
return resp
class ErrorSession(MockSession):
"""Session that raises on fetch."""
def __init__(self, error: Exception | None = None):
super().__init__("error")
self._error = error or RuntimeError("fetch failed")
async def fetch(self, url: str, **kwargs):
raise self._error
class MockSpider:
"""Lightweight spider stub for engine tests."""
def __init__(
self,
*,
concurrent_requests: int = 4,
concurrent_requests_per_domain: int = 0,
download_delay: float = 0.0,
max_blocked_retries: int = 3,
allowed_domains: Set[str] | None = None,
fp_include_kwargs: bool = False,
fp_include_headers: bool = False,
fp_keep_fragments: bool = False,
is_blocked_fn=None,
on_scraped_item_fn=None,
retry_blocked_request_fn=None,
):
self.concurrent_requests = concurrent_requests
self.concurrent_requests_per_domain = concurrent_requests_per_domain
self.download_delay = download_delay
self.max_blocked_retries = max_blocked_retries
self.allowed_domains = allowed_domains or set()
self.fp_include_kwargs = fp_include_kwargs
self.fp_include_headers = fp_include_headers
self.fp_keep_fragments = fp_keep_fragments
self.name = "test_spider"
# Tracking lists
self.on_start_calls: list[dict] = []
self.on_close_calls: int = 0
self.on_error_calls: list[tuple[Request, Exception]] = []
self.scraped_items: list[dict] = []
self.blocked_responses: list = []
self.retry_requests: list = []
# Pluggable behaviour
self._is_blocked_fn = is_blocked_fn
self._on_scraped_item_fn = on_scraped_item_fn
self._retry_blocked_request_fn = retry_blocked_request_fn
# Log counter stub
self._log_counter = _LogCounterStub()
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield {"url": str(response)}
async def on_start(self, resuming: bool = False) -> None:
self.on_start_calls.append({"resuming": resuming})
async def on_close(self) -> None:
self.on_close_calls += 1
async def on_error(self, request: Request, error: Exception) -> None:
self.on_error_calls.append((request, error))
async def on_scraped_item(self, item: Dict[str, Any]) -> Dict[str, Any] | None:
if self._on_scraped_item_fn:
return self._on_scraped_item_fn(item)
self.scraped_items.append(item)
return item
async def is_blocked(self, response) -> bool:
if self._is_blocked_fn:
return self._is_blocked_fn(response)
return False
async def retry_blocked_request(self, request: Request, response) -> Request:
self.retry_requests.append(request)
if self._retry_blocked_request_fn:
return self._retry_blocked_request_fn(request, response)
return request
async def start_requests(self) -> AsyncGenerator[Request, None]:
yield Request("https://example.com", sid="default")
class _LogCounterStub:
"""Stub for LogCounterHandler."""
def get_counts(self) -> Dict[str, int]:
return {"debug": 0, "info": 0, "warning": 0, "error": 0, "critical": 0}
def _make_engine(
spider: MockSpider | None = None,
session: MockSession | None = None,
crawldir: str | None = None,
interval: float = 300.0,
) -> CrawlerEngine:
"""Create a CrawlerEngine wired to mock objects."""
spider = spider or MockSpider()
sm = SessionManager()
sm.add("default", session or MockSession())
return CrawlerEngine(spider, sm, crawldir=crawldir, interval=interval)
# ---------------------------------------------------------------------------
# Tests: _dump helper
# ---------------------------------------------------------------------------
class TestDumpHelper:
def test_dump_returns_json_string(self):
result = _dump({"key": "value"})
assert '"key": "value"' in result
def test_dump_handles_nested(self):
result = _dump({"a": {"b": 1}})
assert '"a"' in result
assert '"b"' in result
# ---------------------------------------------------------------------------
# Tests: __init__
# ---------------------------------------------------------------------------
class TestCrawlerEngineInit:
def test_default_initialisation(self):
engine = _make_engine()
assert engine._running is False
assert engine._active_tasks == 0
assert engine._pause_requested is False
assert engine._force_stop is False
assert engine.paused is False
assert isinstance(engine.stats, CrawlStats)
assert isinstance(engine.items, ItemList)
def test_checkpoint_system_disabled_by_default(self):
engine = _make_engine()
assert engine._checkpoint_system_enabled is False
def test_checkpoint_system_enabled_with_crawldir(self):
with tempfile.TemporaryDirectory() as tmpdir:
engine = _make_engine(crawldir=tmpdir)
assert engine._checkpoint_system_enabled is True
def test_global_limiter_uses_concurrent_requests(self):
spider = MockSpider(concurrent_requests=8)
engine = _make_engine(spider=spider)
assert engine._global_limiter.total_tokens == 8
def test_allowed_domains_from_spider(self):
spider = MockSpider(allowed_domains={"example.com", "test.org"})
engine = _make_engine(spider=spider)
assert engine._allowed_domains == {"example.com", "test.org"}
# ---------------------------------------------------------------------------
# Tests: _is_domain_allowed
# ---------------------------------------------------------------------------
class TestIsDomainAllowed:
def test_all_allowed_when_empty(self):
engine = _make_engine()
request = Request("https://anything.com/page")
assert engine._is_domain_allowed(request) is True
def test_exact_domain_match(self):
spider = MockSpider(allowed_domains={"example.com"})
engine = _make_engine(spider=spider)
assert engine._is_domain_allowed(Request("https://example.com/page")) is True
assert engine._is_domain_allowed(Request("https://other.com/page")) is False
def test_subdomain_match(self):
spider = MockSpider(allowed_domains={"example.com"})
engine = _make_engine(spider=spider)
assert engine._is_domain_allowed(Request("https://sub.example.com/page")) is True
assert engine._is_domain_allowed(Request("https://deep.sub.example.com/x")) is True
def test_partial_name_not_matched(self):
spider = MockSpider(allowed_domains={"example.com"})
engine = _make_engine(spider=spider)
# "notexample.com" should NOT match "example.com"
assert engine._is_domain_allowed(Request("https://notexample.com/x")) is False
def test_multiple_allowed_domains(self):
spider = MockSpider(allowed_domains={"a.com", "b.org"})
engine = _make_engine(spider=spider)
assert engine._is_domain_allowed(Request("https://a.com/")) is True
assert engine._is_domain_allowed(Request("https://b.org/")) is True
assert engine._is_domain_allowed(Request("https://c.net/")) is False
# ---------------------------------------------------------------------------
# Tests: _rate_limiter
# ---------------------------------------------------------------------------
class TestRateLimiter:
def test_returns_global_limiter_when_per_domain_disabled(self):
engine = _make_engine() # concurrent_requests_per_domain=0
limiter = engine._rate_limiter("example.com")
assert limiter is engine._global_limiter
def test_returns_per_domain_limiter_when_enabled(self):
spider = MockSpider(concurrent_requests_per_domain=2)
engine = _make_engine(spider=spider)
limiter = engine._rate_limiter("example.com")
assert limiter is not engine._global_limiter
assert limiter.total_tokens == 2
def test_same_domain_returns_same_limiter(self):
spider = MockSpider(concurrent_requests_per_domain=2)
engine = _make_engine(spider=spider)
l1 = engine._rate_limiter("example.com")
l2 = engine._rate_limiter("example.com")
assert l1 is l2
def test_different_domains_get_different_limiters(self):
spider = MockSpider(concurrent_requests_per_domain=2)
engine = _make_engine(spider=spider)
l1 = engine._rate_limiter("a.com")
l2 = engine._rate_limiter("b.com")
assert l1 is not l2
# ---------------------------------------------------------------------------
# Tests: _normalize_request
# ---------------------------------------------------------------------------
class TestNormalizeRequest:
def test_sets_default_sid_when_empty(self):
engine = _make_engine()
request = Request("https://example.com")
assert request.sid == ""
engine._normalize_request(request)
assert request.sid == "default"
def test_preserves_existing_sid(self):
engine = _make_engine()
request = Request("https://example.com", sid="custom")
engine._normalize_request(request)
assert request.sid == "custom"
# ---------------------------------------------------------------------------
# Tests: _process_request
# ---------------------------------------------------------------------------
class TestProcessRequest:
@pytest.mark.asyncio
async def test_successful_fetch_updates_stats(self):
spider = MockSpider()
session = MockSession(response=MockResponse(status=200, body=b"hello"))
engine = _make_engine(spider=spider, session=session)
request = Request("https://example.com", sid="default")
await engine._process_request(request)
assert engine.stats.requests_count == 1
assert engine.stats.response_bytes == 5 # len(b"hello") from MockSession
assert "status_200" in engine.stats.response_status_count
@pytest.mark.asyncio
async def test_failed_fetch_increments_failed_count(self):
spider = MockSpider()
sm = SessionManager()
sm.add("default", ErrorSession())
engine = CrawlerEngine(spider, sm)
request = Request("https://example.com", sid="default")
await engine._process_request(request)
assert engine.stats.failed_requests_count == 1
assert len(spider.on_error_calls) == 1
@pytest.mark.asyncio
async def test_failed_fetch_does_not_increment_requests_count(self):
spider = MockSpider()
sm = SessionManager()
sm.add("default", ErrorSession())
engine = CrawlerEngine(spider, sm)
request = Request("https://example.com", sid="default")
await engine._process_request(request)
assert engine.stats.requests_count == 0
@pytest.mark.asyncio
async def test_blocked_response_triggers_retry(self):
spider = MockSpider(is_blocked_fn=lambda r: True, max_blocked_retries=2)
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default")
await engine._process_request(request)
assert engine.stats.blocked_requests_count == 1
# A retry request should be enqueued
assert not engine.scheduler.is_empty
@pytest.mark.asyncio
async def test_blocked_response_max_retries_exceeded(self):
spider = MockSpider(is_blocked_fn=lambda r: True, max_blocked_retries=2)
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default")
request._retry_count = 2 # Already at max
await engine._process_request(request)
assert engine.stats.blocked_requests_count == 1
# No retry enqueued
assert engine.scheduler.is_empty
@pytest.mark.asyncio
async def test_retry_request_has_dont_filter(self):
spider = MockSpider(is_blocked_fn=lambda r: True, max_blocked_retries=3)
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default")
await engine._process_request(request)
retry = await engine.scheduler.dequeue()
assert retry.dont_filter is True
assert retry._retry_count == 1
@pytest.mark.asyncio
async def test_retry_clears_proxy_kwargs(self):
spider = MockSpider(is_blocked_fn=lambda r: True, max_blocked_retries=3)
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default", proxy="http://proxy:8080")
await engine._process_request(request)
retry = await engine.scheduler.dequeue()
assert "proxy" not in retry._session_kwargs
assert "proxies" not in retry._session_kwargs
@pytest.mark.asyncio
async def test_callback_yielding_dict_increments_items(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default")
await engine._process_request(request)
assert engine.stats.items_scraped == 1
assert len(engine.items) == 1
@pytest.mark.asyncio
async def test_callback_yielding_request_enqueues(self):
async def callback(response) -> AsyncGenerator:
yield Request("https://example.com/page2", sid="default")
spider = MockSpider()
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default", callback=callback)
await engine._process_request(request)
assert not engine.scheduler.is_empty
@pytest.mark.asyncio
async def test_callback_yielding_offsite_request_filtered(self):
async def callback(response) -> AsyncGenerator:
yield Request("https://other.com/page", sid="default")
spider = MockSpider(allowed_domains={"example.com"})
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default", callback=callback)
await engine._process_request(request)
assert engine.stats.offsite_requests_count == 1
assert engine.scheduler.is_empty
@pytest.mark.asyncio
async def test_dropped_item_when_on_scraped_item_returns_none(self):
spider = MockSpider(on_scraped_item_fn=lambda item: None)
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default")
await engine._process_request(request)
assert engine.stats.items_dropped == 1
assert engine.stats.items_scraped == 0
assert len(engine.items) == 0
@pytest.mark.asyncio
async def test_callback_exception_calls_on_error(self):
async def bad_callback(response) -> AsyncGenerator:
raise ValueError("callback boom")
yield # noqa: unreachable
spider = MockSpider()
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default", callback=bad_callback)
await engine._process_request(request)
assert len(spider.on_error_calls) == 1
assert isinstance(spider.on_error_calls[0][1], ValueError)
@pytest.mark.asyncio
async def test_proxy_tracked_in_stats(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default", proxy="http://p:8080")
await engine._process_request(request)
assert "http://p:8080" in engine.stats.proxies
@pytest.mark.asyncio
async def test_proxies_dict_tracked_in_stats(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
proxies = {"http": "http://p:8080", "https": "https://p:8443"}
request = Request("https://example.com", sid="default", proxies=proxies)
await engine._process_request(request)
assert len(engine.stats.proxies) == 1
assert engine.stats.proxies[0] == proxies
@pytest.mark.asyncio
async def test_uses_parse_when_no_callback(self):
items_seen = []
async def custom_parse(response) -> AsyncGenerator:
yield {"from": "custom_parse"}
spider = MockSpider()
spider.parse = custom_parse # type: ignore[assignment]
engine = _make_engine(spider=spider)
request = Request("https://example.com", sid="default")
# No callback set → should use spider.parse
await engine._process_request(request)
assert engine.stats.items_scraped == 1
# ---------------------------------------------------------------------------
# Tests: _task_wrapper
# ---------------------------------------------------------------------------
class TestTaskWrapper:
@pytest.mark.asyncio
async def test_decrements_active_tasks(self):
engine = _make_engine()
engine._active_tasks = 1
request = Request("https://example.com", sid="default")
await engine._task_wrapper(request)
assert engine._active_tasks == 0
@pytest.mark.asyncio
async def test_decrements_even_on_error(self):
spider = MockSpider()
sm = SessionManager()
sm.add("default", ErrorSession())
engine = CrawlerEngine(spider, sm)
engine._active_tasks = 1
request = Request("https://example.com", sid="default")
await engine._task_wrapper(request)
assert engine._active_tasks == 0
# ---------------------------------------------------------------------------
# Tests: request_pause
# ---------------------------------------------------------------------------
class TestRequestPause:
def test_first_call_sets_pause_requested(self):
engine = _make_engine()
engine.request_pause()
assert engine._pause_requested is True
assert engine._force_stop is False
def test_second_call_sets_force_stop(self):
engine = _make_engine()
engine.request_pause() # first
engine.request_pause() # second
assert engine._pause_requested is True
assert engine._force_stop is True
def test_third_call_after_force_stop_is_noop(self):
engine = _make_engine()
engine.request_pause()
engine.request_pause()
engine.request_pause() # should not raise
assert engine._force_stop is True
# ---------------------------------------------------------------------------
# Tests: checkpoint methods
# ---------------------------------------------------------------------------
class TestCheckpointMethods:
def test_is_checkpoint_time_false_when_disabled(self):
engine = _make_engine() # no crawldir
assert engine._is_checkpoint_time() is False
@pytest.mark.asyncio
async def test_save_and_restore_checkpoint(self):
with tempfile.TemporaryDirectory() as tmpdir:
spider = MockSpider()
engine = _make_engine(spider=spider, crawldir=tmpdir)
# Enqueue a request so snapshot has data
req = Request("https://example.com", sid="default")
engine._normalize_request(req)
await engine.scheduler.enqueue(req)
await engine._save_checkpoint()
# Verify checkpoint file exists
checkpoint_path = Path(tmpdir) / "checkpoint.pkl"
assert checkpoint_path.exists()
@pytest.mark.asyncio
async def test_restore_when_no_checkpoint_returns_false(self):
with tempfile.TemporaryDirectory() as tmpdir:
engine = _make_engine(crawldir=tmpdir)
result = await engine._restore_from_checkpoint()
assert result is False
@pytest.mark.asyncio
async def test_restore_from_checkpoint_raises_when_disabled(self):
engine = _make_engine() # no crawldir → checkpoint disabled
with pytest.raises(RuntimeError):
await engine._restore_from_checkpoint()
# ---------------------------------------------------------------------------
# Tests: crawl
# ---------------------------------------------------------------------------
class TestCrawl:
@pytest.mark.asyncio
async def test_basic_crawl_returns_stats(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
stats = await engine.crawl()
assert isinstance(stats, CrawlStats)
assert stats.requests_count >= 1
assert stats.items_scraped >= 1
@pytest.mark.asyncio
async def test_crawl_calls_on_start_and_on_close(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
await engine.crawl()
assert len(spider.on_start_calls) == 1
assert spider.on_start_calls[0]["resuming"] is False
assert spider.on_close_calls == 1
@pytest.mark.asyncio
async def test_crawl_sets_stats_timing(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
stats = await engine.crawl()
assert stats.start_time > 0
assert stats.end_time > 0
assert stats.end_time >= stats.start_time
@pytest.mark.asyncio
async def test_crawl_sets_concurrency_stats(self):
spider = MockSpider(concurrent_requests=16, concurrent_requests_per_domain=4)
engine = _make_engine(spider=spider)
stats = await engine.crawl()
assert stats.concurrent_requests == 16
assert stats.concurrent_requests_per_domain == 4
@pytest.mark.asyncio
async def test_crawl_processes_multiple_start_urls(self):
spider = MockSpider()
urls = ["https://example.com/1", "https://example.com/2", "https://example.com/3"]
async def multi_start_requests() -> AsyncGenerator[Request, None]:
for url in urls:
yield Request(url, sid="default")
spider.start_requests = multi_start_requests # type: ignore[assignment]
engine = _make_engine(spider=spider)
stats = await engine.crawl()
assert stats.requests_count == 3
assert stats.items_scraped == 3
@pytest.mark.asyncio
async def test_crawl_follows_yielded_requests(self):
"""Test that requests yielded from callbacks are processed."""
call_count = 0
async def parse_with_follow(response) -> AsyncGenerator:
nonlocal call_count
call_count += 1
if call_count == 1:
yield Request("https://example.com/page2", sid="default")
yield {"page": str(response)}
spider = MockSpider()
spider.parse = parse_with_follow # type: ignore[assignment]
engine = _make_engine(spider=spider)
stats = await engine.crawl()
assert stats.requests_count == 2
assert stats.items_scraped == 2
@pytest.mark.asyncio
async def test_crawl_with_download_delay(self):
spider = MockSpider(download_delay=0.01)
engine = _make_engine(spider=spider)
stats = await engine.crawl()
assert stats.download_delay == 0.01
assert stats.requests_count >= 1
@pytest.mark.asyncio
async def test_crawl_filters_offsite_requests(self):
async def parse_offsite(response) -> AsyncGenerator:
yield Request("https://other-domain.com/page", sid="default")
yield {"url": str(response)}
spider = MockSpider(allowed_domains={"example.com"})
spider.parse = parse_offsite # type: ignore[assignment]
engine = _make_engine(spider=spider)
stats = await engine.crawl()
assert stats.offsite_requests_count == 1
assert stats.requests_count == 1 # Only the initial request
@pytest.mark.asyncio
async def test_crawl_cleans_up_checkpoint_on_completion(self):
with tempfile.TemporaryDirectory() as tmpdir:
spider = MockSpider()
engine = _make_engine(spider=spider, crawldir=tmpdir)
await engine.crawl()
checkpoint_path = Path(tmpdir) / "checkpoint.pkl"
assert not checkpoint_path.exists() # Cleaned up
@pytest.mark.asyncio
async def test_crawl_handles_fetch_error_gracefully(self):
spider = MockSpider()
sm = SessionManager()
sm.add("default", ErrorSession())
engine = CrawlerEngine(spider, sm)
stats = await engine.crawl()
assert stats.failed_requests_count == 1
assert len(spider.on_error_calls) == 1
@pytest.mark.asyncio
async def test_crawl_log_levels_populated(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
stats = await engine.crawl()
assert isinstance(stats.log_levels_counter, dict)
@pytest.mark.asyncio
async def test_crawl_resets_state_on_each_run(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
# Run first crawl
await engine.crawl()
assert engine.stats.requests_count >= 1
# Run second crawl - stats should reset
stats = await engine.crawl()
# Items are cleared on each crawl
assert engine.paused is False
# ---------------------------------------------------------------------------
# Tests: items property
# ---------------------------------------------------------------------------
class TestItemsProperty:
def test_items_returns_item_list(self):
engine = _make_engine()
assert isinstance(engine.items, ItemList)
def test_items_initially_empty(self):
engine = _make_engine()
assert len(engine.items) == 0
@pytest.mark.asyncio
async def test_items_populated_after_crawl(self):
engine = _make_engine()
await engine.crawl()
assert len(engine.items) >= 1
# ---------------------------------------------------------------------------
# Tests: streaming (__aiter__ / _stream)
# ---------------------------------------------------------------------------
class TestStreaming:
@pytest.mark.asyncio
async def test_stream_yields_items(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
items = []
async for item in engine:
items.append(item)
assert len(items) >= 1
assert isinstance(items[0], dict)
@pytest.mark.asyncio
async def test_stream_processes_follow_up_requests(self):
call_count = 0
async def parse_with_follow(response) -> AsyncGenerator:
nonlocal call_count
call_count += 1
if call_count == 1:
yield Request("https://example.com/page2", sid="default")
yield {"page": call_count}
spider = MockSpider()
spider.parse = parse_with_follow # type: ignore[assignment]
engine = _make_engine(spider=spider)
items = []
async for item in engine:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_stream_items_not_stored_in_items_list(self):
"""When streaming, items go to the stream, not to engine._items."""
spider = MockSpider()
engine = _make_engine(spider=spider)
items = []
async for item in engine:
items.append(item)
# Items were sent through stream, not appended to _items
assert len(items) >= 1
assert len(engine.items) == 0
# ---------------------------------------------------------------------------
# Tests: pause during crawl
# ---------------------------------------------------------------------------
class TestPauseDuringCrawl:
@pytest.mark.asyncio
async def test_pause_stops_crawl_gracefully(self):
processed = 0
async def slow_parse(response) -> AsyncGenerator:
nonlocal processed
processed += 1
# Yield more requests to keep the crawl going
if processed <= 2:
yield Request(f"https://example.com/p{processed + 1}", sid="default")
yield {"n": processed}
spider = MockSpider()
spider.parse = slow_parse # type: ignore[assignment]
engine = _make_engine(spider=spider)
# Request pause immediately - the engine will stop as soon as active tasks complete
engine._pause_requested = True
stats = await engine.crawl()
# Should stop without processing everything
assert engine._running is False
@pytest.mark.asyncio
async def test_pause_with_checkpoint_sets_paused(self):
with tempfile.TemporaryDirectory() as tmpdir:
parse_count = 0
async def parse_and_pause(response) -> AsyncGenerator:
nonlocal parse_count
parse_count += 1
# Request pause after first request, but yield follow-ups
if parse_count == 1:
engine.request_pause()
yield Request("https://example.com/p2", sid="default")
yield {"n": parse_count}
spider = MockSpider()
spider.parse = parse_and_pause # type: ignore[assignment]
engine = _make_engine(spider=spider, crawldir=tmpdir)
await engine.crawl()
assert engine.paused is True
@pytest.mark.asyncio
async def test_pause_without_checkpoint_does_not_set_paused(self):
spider = MockSpider()
engine = _make_engine(spider=spider)
engine._pause_requested = True
await engine.crawl()
assert engine.paused is False
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/spiders/test_engine.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 668,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/spiders/test_request.py | """Tests for the Request class."""
import pickle
import pytest
from scrapling.spiders.request import Request
from scrapling.core._types import Any, Dict, AsyncGenerator
class TestRequestCreation:
"""Test Request initialization and basic attributes."""
def test_basic_request_creation(self):
"""Test creating a request with just a URL."""
request = Request("https://example.com")
assert request.url == "https://example.com"
assert request.sid == ""
assert request.callback is None
assert request.priority == 0
assert request.dont_filter is False
assert request.meta == {}
assert request._retry_count == 0
assert request._session_kwargs == {}
def test_request_with_all_parameters(self):
"""Test creating a request with all parameters."""
async def my_callback(response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield {"test": "data"}
request = Request(
url="https://example.com/page",
sid="my_session",
callback=my_callback,
priority=10,
dont_filter=True,
meta={"key": "value"},
_retry_count=2,
proxy="http://proxy:8080",
timeout=30,
)
assert request.url == "https://example.com/page"
assert request.sid == "my_session"
assert request.callback == my_callback
assert request.priority == 10
assert request.dont_filter is True
assert request.meta == {"key": "value"}
assert request._retry_count == 2
assert request._session_kwargs == {"proxy": "http://proxy:8080", "timeout": 30}
def test_request_meta_default_is_empty_dict(self):
"""Test that meta defaults to empty dict, not shared reference."""
r1 = Request("https://example.com")
r2 = Request("https://example.com")
r1.meta["key"] = "value"
assert r1.meta == {"key": "value"}
assert r2.meta == {}
class TestRequestProperties:
"""Test Request computed properties."""
def test_domain_extraction(self):
"""Test domain property extracts netloc correctly."""
request = Request("https://www.example.com/path/page.html?query=1")
assert request.domain == "www.example.com"
def test_domain_with_port(self):
"""Test domain extraction with port number."""
request = Request("http://localhost:8080/api")
assert request.domain == "localhost:8080"
def test_domain_with_subdomain(self):
"""Test domain extraction with subdomains."""
request = Request("https://api.v2.example.com/endpoint")
assert request.domain == "api.v2.example.com"
def test_fingerprint_returns_bytes(self):
"""Test fingerprint generation returns bytes."""
request = Request("https://example.com")
fp = request.update_fingerprint()
assert isinstance(fp, bytes)
assert len(fp) == 20 # SHA1 produces 20 bytes
def test_fingerprint_is_deterministic(self):
"""Test same request produces same fingerprint."""
r1 = Request("https://example.com", data={"key": "value"})
r2 = Request("https://example.com", data={"key": "value"})
assert r1.update_fingerprint() == r2.update_fingerprint()
def test_fingerprint_different_urls(self):
"""Test different URLs produce different fingerprints."""
r1 = Request("https://example.com/page1")
r2 = Request("https://example.com/page2")
assert r1.update_fingerprint() != r2.update_fingerprint()
class TestRequestCopy:
"""Test Request copy functionality."""
def test_copy_creates_independent_request(self):
"""Test that copy creates a new independent request."""
async def callback(response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
original = Request(
url="https://example.com",
sid="session",
callback=callback,
priority=5,
dont_filter=True,
meta={"original": True},
_retry_count=1,
proxy="http://proxy:8080",
)
copied = original.copy()
# Check all values are copied
assert copied.url == original.url
assert copied.sid == original.sid
assert copied.callback == original.callback
assert copied.priority == original.priority
assert copied.dont_filter == original.dont_filter
assert copied.meta == original.meta
assert copied._retry_count == original._retry_count
assert copied._session_kwargs == original._session_kwargs
# Check they are different objects
assert copied is not original
assert copied.meta is not original.meta # Meta should be a copy
def test_copy_meta_is_independent(self):
"""Test that modifying copied meta doesn't affect original."""
original = Request("https://example.com", meta={"key": "original"})
copied = original.copy()
copied.meta["key"] = "modified"
copied.meta["new_key"] = "new_value"
assert original.meta == {"key": "original"}
assert copied.meta == {"key": "modified", "new_key": "new_value"}
class TestRequestComparison:
"""Test Request comparison operators."""
def test_priority_less_than(self):
"""Test less than comparison by priority."""
low_priority = Request("https://example.com/1", priority=1)
high_priority = Request("https://example.com/2", priority=10)
assert low_priority < high_priority
assert not high_priority < low_priority
def test_priority_greater_than(self):
"""Test greater than comparison by priority."""
low_priority = Request("https://example.com/1", priority=1)
high_priority = Request("https://example.com/2", priority=10)
assert high_priority > low_priority
assert not low_priority > high_priority
def test_equality_by_fingerprint(self):
"""Test equality comparison by fingerprint."""
r1 = Request("https://example.com")
r2 = Request("https://example.com")
r3 = Request("https://example.com/other")
# Generate fingerprints first (required for equality)
r1.update_fingerprint()
r2.update_fingerprint()
r3.update_fingerprint()
assert r1 == r2
assert r1 != r3
def test_equality_different_priorities_same_fingerprint(self):
"""Test requests with same fingerprint are equal despite different priorities."""
r1 = Request("https://example.com", priority=1)
r2 = Request("https://example.com", priority=100)
# Generate fingerprints first
r1.update_fingerprint()
r2.update_fingerprint()
assert r1 == r2 # Same fingerprint means equal
def test_comparison_with_non_request(self):
"""Test comparison with non-Request types returns NotImplemented."""
request = Request("https://example.com")
assert request.__lt__("not a request") == NotImplemented
assert request.__gt__("not a request") == NotImplemented
assert request.__eq__("not a request") == NotImplemented
class TestRequestStringRepresentation:
"""Test Request string representations."""
def test_str_returns_url(self):
"""Test __str__ returns the URL."""
request = Request("https://example.com/page")
assert str(request) == "https://example.com/page"
def test_repr_without_callback(self):
"""Test __repr__ without callback."""
request = Request("https://example.com", priority=5)
repr_str = repr(request)
assert "Request" in repr_str
assert "https://example.com" in repr_str
assert "priority=5" in repr_str
assert "callback=None" in repr_str
def test_repr_with_callback(self):
"""Test __repr__ with named callback."""
async def my_custom_callback(response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
request = Request("https://example.com", callback=my_custom_callback)
repr_str = repr(request)
assert "callback=my_custom_callback" in repr_str
class TestRequestPickling:
"""Test Request serialization for checkpointing."""
def test_pickle_without_callback(self):
"""Test pickling request without callback."""
original = Request(
url="https://example.com",
sid="session",
priority=5,
meta={"key": "value"},
)
pickled = pickle.dumps(original)
restored = pickle.loads(pickled)
assert restored.url == original.url
assert restored.sid == original.sid
assert restored.priority == original.priority
assert restored.meta == original.meta
assert restored.callback is None
def test_pickle_with_callback_stores_name(self):
"""Test that callback name is stored when pickling."""
async def parse_page(response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield {"data": "test"}
original = Request("https://example.com", callback=parse_page)
# Check getstate stores callback name
state = original.__getstate__()
assert state["_callback_name"] == "parse_page"
assert state["callback"] is None
def test_pickle_with_none_callback(self):
"""Test pickling with None callback."""
original = Request("https://example.com", callback=None)
state = original.__getstate__()
assert state["_callback_name"] is None
assert state["callback"] is None
def test_setstate_stores_callback_name(self):
"""Test that setstate correctly handles callback name."""
request = Request("https://example.com")
state = {
"url": "https://example.com",
"sid": "",
"callback": None,
"priority": 0,
"dont_filter": False,
"meta": {},
"_retry_count": 0,
"_session_kwargs": {},
"_callback_name": "custom_parse",
}
request.__setstate__(state)
assert hasattr(request, "_callback_name")
assert request._callback_name == "custom_parse"
def test_pickle_roundtrip_preserves_session_kwargs(self):
"""Test that session kwargs are preserved through pickle."""
original = Request(
"https://example.com",
proxy="http://proxy:8080",
timeout=30,
headers={"User-Agent": "test"},
)
pickled = pickle.dumps(original)
restored = pickle.loads(pickled)
assert restored._session_kwargs == {
"proxy": "http://proxy:8080",
"timeout": 30,
"headers": {"User-Agent": "test"},
}
class TestRequestRestoreCallback:
"""Test callback restoration from spider."""
def test_restore_callback_from_spider(self):
"""Test restoring callback from spider instance."""
class MockSpider:
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
async def parse_detail(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield {"detail": True}
spider = MockSpider()
request = Request("https://example.com")
request._callback_name = "parse_detail"
request._restore_callback(spider) # type: ignore[arg-type]
assert request.callback == spider.parse_detail
assert not hasattr(request, "_callback_name")
def test_restore_callback_falls_back_to_parse(self):
"""Test that missing callback falls back to spider.parse."""
class MockSpider:
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = MockSpider()
request = Request("https://example.com")
request._callback_name = "nonexistent_method"
request._restore_callback(spider) # type: ignore[arg-type]
assert request.callback == spider.parse
assert not hasattr(request, "_callback_name")
def test_restore_callback_with_none_name(self):
"""Test restore callback when _callback_name is None."""
class MockSpider:
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = MockSpider()
request = Request("https://example.com")
request._callback_name = None
request._restore_callback(spider) # type: ignore[arg-type]
# Should clean up _callback_name attribute
assert not hasattr(request, "_callback_name")
def test_restore_callback_without_callback_name_attr(self):
"""Test restore callback when _callback_name attribute doesn't exist."""
class MockSpider:
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = MockSpider()
request = Request("https://example.com")
# Don't set _callback_name
# Should not raise an error
request._restore_callback(spider) # type: ignore[arg-type]
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/spiders/test_request.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/spiders/test_result.py | """Tests for the result module (ItemList, CrawlStats, CrawlResult)."""
import json
import tempfile
from pathlib import Path
import pytest
from scrapling.spiders.result import ItemList, CrawlStats, CrawlResult
class TestItemList:
"""Test ItemList functionality."""
def test_itemlist_is_list(self):
"""Test that ItemList is a list subclass."""
items = ItemList()
assert isinstance(items, list)
def test_itemlist_basic_operations(self):
"""Test basic list operations work."""
items = ItemList()
items.append({"id": 1})
items.append({"id": 2})
assert len(items) == 2
assert items[0] == {"id": 1}
def test_to_json_creates_file(self):
"""Test to_json creates JSON file."""
items = ItemList()
items.append({"name": "test", "value": 123})
items.append({"name": "test2", "value": 456})
with tempfile.TemporaryDirectory() as tmpdir:
path = Path(tmpdir) / "output.json"
items.to_json(path)
assert path.exists()
content = json.loads(path.read_text())
assert len(content) == 2
assert content[0]["name"] == "test"
def test_to_json_creates_parent_directory(self):
"""Test to_json creates parent directories."""
items = ItemList()
items.append({"data": "test"})
with tempfile.TemporaryDirectory() as tmpdir:
path = Path(tmpdir) / "nested" / "dirs" / "output.json"
items.to_json(path)
assert path.exists()
def test_to_json_with_indent(self):
"""Test to_json with indentation."""
items = ItemList()
items.append({"key": "value"})
with tempfile.TemporaryDirectory() as tmpdir:
path = Path(tmpdir) / "output.json"
items.to_json(path, indent=True)
content = path.read_text()
# Indented JSON should have newlines
assert "\n" in content
def test_to_jsonl_creates_file(self):
"""Test to_jsonl creates JSON Lines file."""
items = ItemList()
items.append({"id": 1, "name": "first"})
items.append({"id": 2, "name": "second"})
items.append({"id": 3, "name": "third"})
with tempfile.TemporaryDirectory() as tmpdir:
path = Path(tmpdir) / "output.jsonl"
items.to_jsonl(path)
assert path.exists()
lines = path.read_text().strip().split("\n")
assert len(lines) == 3
# Each line should be valid JSON
for line in lines:
parsed = json.loads(line)
assert "id" in parsed
assert "name" in parsed
def test_to_jsonl_one_object_per_line(self):
"""Test that JSONL has one JSON object per line."""
items = ItemList()
items.append({"line": 1})
items.append({"line": 2})
with tempfile.TemporaryDirectory() as tmpdir:
path = Path(tmpdir) / "output.jsonl"
items.to_jsonl(path)
lines = path.read_text().strip().split("\n")
assert json.loads(lines[0])["line"] == 1
assert json.loads(lines[1])["line"] == 2
class TestCrawlStats:
"""Test CrawlStats dataclass."""
def test_default_values(self):
"""Test CrawlStats default values."""
stats = CrawlStats()
assert stats.requests_count == 0
assert stats.concurrent_requests == 0
assert stats.failed_requests_count == 0
assert stats.response_bytes == 0
assert stats.items_scraped == 0
assert stats.items_dropped == 0
assert stats.start_time == 0.0
assert stats.end_time == 0.0
assert stats.custom_stats == {}
assert stats.response_status_count == {}
assert stats.proxies == []
def test_elapsed_seconds(self):
"""Test elapsed_seconds property."""
stats = CrawlStats(start_time=100.0, end_time=150.0)
assert stats.elapsed_seconds == 50.0
def test_requests_per_second(self):
"""Test requests_per_second calculation."""
stats = CrawlStats(
requests_count=100,
start_time=0.0,
end_time=10.0,
)
assert stats.requests_per_second == 10.0
def test_requests_per_second_zero_elapsed(self):
"""Test requests_per_second when elapsed is zero."""
stats = CrawlStats(
requests_count=100,
start_time=0.0,
end_time=0.0,
)
assert stats.requests_per_second == 0.0
def test_increment_status(self):
"""Test increment_status method."""
stats = CrawlStats()
stats.increment_status(200)
stats.increment_status(200)
stats.increment_status(404)
assert stats.response_status_count == {"status_200": 2, "status_404": 1}
def test_increment_response_bytes(self):
"""Test increment_response_bytes method."""
stats = CrawlStats()
stats.increment_response_bytes("example.com", 1000)
stats.increment_response_bytes("example.com", 500)
stats.increment_response_bytes("other.com", 2000)
assert stats.response_bytes == 3500
assert stats.domains_response_bytes == {
"example.com": 1500,
"other.com": 2000,
}
def test_increment_requests_count(self):
"""Test increment_requests_count method."""
stats = CrawlStats()
stats.increment_requests_count("session1")
stats.increment_requests_count("session1")
stats.increment_requests_count("session2")
assert stats.requests_count == 3
assert stats.sessions_requests_count == {"session1": 2, "session2": 1}
def test_to_dict(self):
"""Test to_dict method returns all stats."""
stats = CrawlStats(
items_scraped=10,
items_dropped=2,
requests_count=15,
start_time=0.0,
end_time=5.0,
)
stats.increment_status(200)
result = stats.to_dict()
assert result["items_scraped"] == 10
assert result["items_dropped"] == 2
assert result["requests_count"] == 15
assert result["elapsed_seconds"] == 5.0
assert result["requests_per_second"] == 3.0
assert result["response_status_count"] == {"status_200": 1}
def test_custom_stats(self):
"""Test custom_stats can be used."""
stats = CrawlStats()
stats.custom_stats["my_metric"] = 42
stats.custom_stats["another"] = "value"
assert stats.custom_stats["my_metric"] == 42
assert stats.to_dict()["custom_stats"]["my_metric"] == 42
class TestCrawlResult:
"""Test CrawlResult dataclass."""
def test_basic_creation(self):
"""Test basic CrawlResult creation."""
stats = CrawlStats(items_scraped=5)
items = ItemList()
items.extend([{"id": i} for i in range(5)])
result = CrawlResult(stats=stats, items=items)
assert result.stats.items_scraped == 5
assert len(result.items) == 5
assert result.paused is False
def test_completed_property_true_when_not_paused(self):
"""Test completed is True when not paused."""
result = CrawlResult(
stats=CrawlStats(),
items=ItemList(),
paused=False,
)
assert result.completed is True
def test_completed_property_false_when_paused(self):
"""Test completed is False when paused."""
result = CrawlResult(
stats=CrawlStats(),
items=ItemList(),
paused=True,
)
assert result.completed is False
def test_len_returns_item_count(self):
"""Test len returns number of items."""
items = ItemList()
items.extend([{"id": i} for i in range(10)])
result = CrawlResult(stats=CrawlStats(), items=items)
assert len(result) == 10
def test_iter_yields_items(self):
"""Test iteration yields items."""
items = ItemList()
items.extend([{"id": 1}, {"id": 2}, {"id": 3}])
result = CrawlResult(stats=CrawlStats(), items=items)
collected = list(result)
assert collected == [{"id": 1}, {"id": 2}, {"id": 3}]
def test_result_with_stats(self):
"""Test CrawlResult with populated stats."""
stats = CrawlStats(
requests_count=100,
items_scraped=50,
failed_requests_count=5,
start_time=0.0,
end_time=10.0,
)
items = ItemList()
result = CrawlResult(stats=stats, items=items)
assert result.stats.requests_count == 100
assert result.stats.items_scraped == 50
assert result.stats.requests_per_second == 10.0
class TestCrawlResultIntegration:
"""Integration tests for result classes."""
def test_full_workflow(self):
"""Test realistic workflow with all result classes."""
# Simulate a crawl
stats = CrawlStats(start_time=1000.0)
# Simulate requests
for _ in range(10):
stats.increment_requests_count("default")
stats.increment_status(200)
stats.increment_response_bytes("example.com", 5000)
# Simulate some failures
stats.failed_requests_count = 2
stats.blocked_requests_count = 1
# Collect items
items = ItemList()
for i in range(8):
items.append({"product_id": i, "name": f"Product {i}"})
stats.items_scraped += 1
# Finish crawl
stats.end_time = 1005.0
# Create result
result = CrawlResult(stats=stats, items=items, paused=False)
# Verify
assert result.completed is True
assert len(result) == 8
assert result.stats.requests_count == 10
assert result.stats.requests_per_second == 2.0
assert result.stats.response_bytes == 50000
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/spiders/test_result.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/spiders/test_scheduler.py | """Tests for the Scheduler class."""
import pytest
from scrapling.spiders.request import Request
from scrapling.spiders.scheduler import Scheduler
from scrapling.spiders.checkpoint import CheckpointData
class TestSchedulerInit:
"""Test Scheduler initialization."""
def test_scheduler_starts_empty(self):
"""Test that scheduler starts with empty queue."""
scheduler = Scheduler()
assert len(scheduler) == 0
assert scheduler.is_empty is True
class TestSchedulerEnqueue:
"""Test Scheduler enqueue functionality."""
@pytest.mark.asyncio
async def test_enqueue_single_request(self):
"""Test enqueueing a single request."""
scheduler = Scheduler()
request = Request("https://example.com")
result = await scheduler.enqueue(request)
assert result is True
assert len(scheduler) == 1
assert scheduler.is_empty is False
@pytest.mark.asyncio
async def test_enqueue_multiple_requests(self):
"""Test enqueueing multiple requests."""
scheduler = Scheduler()
for i in range(5):
request = Request(f"https://example.com/{i}")
await scheduler.enqueue(request)
assert len(scheduler) == 5
@pytest.mark.asyncio
async def test_enqueue_duplicate_filtered(self):
"""Test that duplicate requests are filtered by default."""
scheduler = Scheduler()
request1 = Request("https://example.com", sid="s1")
request2 = Request("https://example.com", sid="s1") # Same fingerprint
result1 = await scheduler.enqueue(request1)
result2 = await scheduler.enqueue(request2)
assert result1 is True
assert result2 is False # Duplicate filtered
assert len(scheduler) == 1
@pytest.mark.asyncio
async def test_enqueue_duplicate_allowed_with_dont_filter(self):
"""Test that dont_filter allows duplicate requests."""
scheduler = Scheduler()
request1 = Request("https://example.com", sid="s1")
request2 = Request("https://example.com", sid="s1", dont_filter=True)
result1 = await scheduler.enqueue(request1)
result2 = await scheduler.enqueue(request2)
assert result1 is True
assert result2 is True
assert len(scheduler) == 2
@pytest.mark.asyncio
async def test_enqueue_different_methods_not_duplicate(self):
"""Test that same URL with different methods are not duplicates."""
scheduler = Scheduler()
request1 = Request("https://example.com", method="GET")
request2 = Request("https://example.com", method="POST")
result1 = await scheduler.enqueue(request1)
result2 = await scheduler.enqueue(request2)
assert result1 is True
assert result2 is True
assert len(scheduler) == 2
class TestSchedulerDequeue:
"""Test Scheduler dequeue functionality."""
@pytest.mark.asyncio
async def test_dequeue_returns_request(self):
"""Test that dequeue returns the enqueued request."""
scheduler = Scheduler()
original = Request("https://example.com")
await scheduler.enqueue(original)
dequeued = await scheduler.dequeue()
assert dequeued.url == original.url
@pytest.mark.asyncio
async def test_dequeue_respects_priority_order(self):
"""Test that higher priority requests are dequeued first."""
scheduler = Scheduler()
low = Request("https://example.com/low", priority=1)
high = Request("https://example.com/high", priority=10)
medium = Request("https://example.com/medium", priority=5)
await scheduler.enqueue(low)
await scheduler.enqueue(high)
await scheduler.enqueue(medium)
# Should get high priority first
first = await scheduler.dequeue()
assert first.url == "https://example.com/high"
second = await scheduler.dequeue()
assert second.url == "https://example.com/medium"
third = await scheduler.dequeue()
assert third.url == "https://example.com/low"
@pytest.mark.asyncio
async def test_dequeue_fifo_for_same_priority(self):
"""Test FIFO ordering for requests with same priority."""
scheduler = Scheduler()
for i in range(3):
request = Request(f"https://example.com/{i}", priority=5)
await scheduler.enqueue(request)
first = await scheduler.dequeue()
second = await scheduler.dequeue()
third = await scheduler.dequeue()
# Should be in FIFO order since same priority
assert first.url == "https://example.com/0"
assert second.url == "https://example.com/1"
assert third.url == "https://example.com/2"
@pytest.mark.asyncio
async def test_dequeue_updates_length(self):
"""Test that dequeue decreases the queue length."""
scheduler = Scheduler()
await scheduler.enqueue(Request("https://example.com/1"))
await scheduler.enqueue(Request("https://example.com/2"))
assert len(scheduler) == 2
await scheduler.dequeue()
assert len(scheduler) == 1
await scheduler.dequeue()
assert len(scheduler) == 0
assert scheduler.is_empty is True
class TestSchedulerSnapshot:
"""Test Scheduler snapshot functionality for checkpointing."""
@pytest.mark.asyncio
async def test_snapshot_empty_scheduler(self):
"""Test snapshot of empty scheduler."""
scheduler = Scheduler()
requests, seen = scheduler.snapshot()
assert requests == []
assert seen == set()
@pytest.mark.asyncio
async def test_snapshot_captures_pending_requests(self):
"""Test snapshot captures all pending requests."""
scheduler = Scheduler()
await scheduler.enqueue(Request("https://example.com/1", priority=5))
await scheduler.enqueue(Request("https://example.com/2", priority=10))
await scheduler.enqueue(Request("https://example.com/3", priority=1))
requests, seen = scheduler.snapshot()
assert len(requests) == 3
# Should be sorted by priority (highest first due to negative priority in queue)
assert requests[0].url == "https://example.com/2" # priority 10
assert requests[1].url == "https://example.com/1" # priority 5
assert requests[2].url == "https://example.com/3" # priority 1
@pytest.mark.asyncio
async def test_snapshot_captures_seen_set(self):
"""Test snapshot captures seen fingerprints."""
scheduler = Scheduler()
await scheduler.enqueue(Request("https://example.com/1"))
await scheduler.enqueue(Request("https://example.com/2"))
requests, seen = scheduler.snapshot()
assert len(seen) == 2
# Fingerprints are now bytes (SHA1 hashes)
for fp in seen:
assert isinstance(fp, bytes)
assert len(fp) == 20 # SHA1 produces 20 bytes
@pytest.mark.asyncio
async def test_snapshot_returns_copies(self):
"""Test that snapshot returns copies, not references."""
scheduler = Scheduler()
await scheduler.enqueue(Request("https://example.com"))
requests, seen = scheduler.snapshot()
# Modifying snapshot shouldn't affect scheduler
requests.append(Request("https://modified.com"))
seen.add(b"new_fingerprint_bytes")
original_requests, original_seen = scheduler.snapshot()
assert len(original_requests) == 1
assert b"new_fingerprint_bytes" not in original_seen
@pytest.mark.asyncio
async def test_snapshot_excludes_dequeued_requests(self):
"""Test snapshot only includes pending requests."""
scheduler = Scheduler()
await scheduler.enqueue(Request("https://example.com/1"))
await scheduler.enqueue(Request("https://example.com/2"))
await scheduler.enqueue(Request("https://example.com/3"))
# Dequeue one
await scheduler.dequeue()
requests, seen = scheduler.snapshot()
# Snapshot should only have 2 pending requests
assert len(requests) == 2
# But seen should still have all 3 (deduplication tracking)
assert len(seen) == 3
class TestSchedulerRestore:
"""Test Scheduler restore functionality from checkpoint."""
@pytest.mark.asyncio
async def test_restore_requests(self):
"""Test restoring requests from checkpoint data."""
scheduler = Scheduler()
checkpoint_requests = [
Request("https://example.com/1", priority=10),
Request("https://example.com/2", priority=5),
]
checkpoint_seen = {b"fp1_bytes_padded!", b"fp2_bytes_padded!", b"fp3_bytes_padded!"}
data = CheckpointData(requests=checkpoint_requests, seen=checkpoint_seen)
scheduler.restore(data)
assert len(scheduler) == 2
@pytest.mark.asyncio
async def test_restore_seen_set(self):
"""Test that restore sets up seen fingerprints."""
scheduler = Scheduler()
data = CheckpointData(
requests=[],
seen={b"fp1_bytes_here_pad", b"fp2_bytes_here_pad"}, # Bytes fingerprints
)
scheduler.restore(data)
# Verify seen set was restored
_, seen = scheduler.snapshot()
assert seen == {b"fp1_bytes_here_pad", b"fp2_bytes_here_pad"}
@pytest.mark.asyncio
async def test_restore_maintains_priority_order(self):
"""Test that restored requests maintain priority order."""
scheduler = Scheduler()
# Requests should already be sorted by priority in checkpoint
checkpoint_requests = [
Request("https://example.com/high", priority=10),
Request("https://example.com/low", priority=1),
]
data = CheckpointData(requests=checkpoint_requests, seen=set())
scheduler.restore(data)
# Dequeue should return high priority first
first = await scheduler.dequeue()
assert first.url == "https://example.com/high"
second = await scheduler.dequeue()
assert second.url == "https://example.com/low"
@pytest.mark.asyncio
async def test_restore_empty_checkpoint(self):
"""Test restoring from empty checkpoint."""
scheduler = Scheduler()
data = CheckpointData(requests=[], seen=set())
scheduler.restore(data)
assert len(scheduler) == 0
assert scheduler.is_empty is True
class TestSchedulerIntegration:
"""Integration tests for Scheduler with checkpoint roundtrip."""
@pytest.mark.asyncio
async def test_snapshot_and_restore_roundtrip(self):
"""Test that snapshot -> restore works correctly."""
# Create and populate original scheduler
original = Scheduler()
await original.enqueue(Request("https://example.com/1", sid="s1", priority=10))
await original.enqueue(Request("https://example.com/2", sid="s1", priority=5))
await original.enqueue(Request("https://example.com/3", sid="s2", priority=7))
# Snapshot
requests, seen = original.snapshot()
data = CheckpointData(requests=requests, seen=seen)
# Restore to new scheduler
restored = Scheduler()
restored.restore(data)
# Verify state matches
assert len(restored) == len(original)
# Dequeue from both and compare
for _ in range(3):
orig_req = await original.dequeue()
rest_req = await restored.dequeue()
assert orig_req.url == rest_req.url
assert orig_req.priority == rest_req.priority
@pytest.mark.asyncio
async def test_partial_processing_then_checkpoint(self):
"""Test checkpointing after partial processing."""
scheduler = Scheduler()
# Enqueue 5 requests
for i in range(5):
await scheduler.enqueue(Request(f"https://example.com/{i}"))
# Process 2
await scheduler.dequeue()
await scheduler.dequeue()
# Snapshot should show 3 pending, 5 seen
requests, seen = scheduler.snapshot()
assert len(requests) == 3
assert len(seen) == 5
@pytest.mark.asyncio
async def test_deduplication_after_restore(self):
"""Test that deduplication works after restore."""
scheduler = Scheduler()
await scheduler.enqueue(Request("https://example.com", sid="s1"))
requests, seen = scheduler.snapshot()
data = CheckpointData(requests=requests, seen=seen)
# Restore to new scheduler
new_scheduler = Scheduler()
new_scheduler.restore(data)
# Try to add duplicate - should be filtered
result = await new_scheduler.enqueue(Request("https://example.com", sid="s1"))
assert result is False # Duplicate filtered based on restored seen set
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/spiders/test_scheduler.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 278,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/spiders/test_session.py | """Tests for the SessionManager class."""
from scrapling.core._types import Any
import pytest
from scrapling.spiders.session import SessionManager
class MockSession: # type: ignore[type-arg]
"""Mock session for testing without actual network calls."""
def __init__(self, name: str = "mock"):
self.name = name
self._is_alive = False
self._started = False
self._closed = False
async def __aenter__(self):
self._is_alive = True
self._started = True
return self
async def __aexit__(self, *args):
self._is_alive = False
self._closed = True
async def fetch(self, url: str, **kwargs):
pass
class TestSessionManagerInit:
"""Test SessionManager initialization."""
def test_manager_starts_empty(self):
"""Test that manager starts with no sessions."""
manager = SessionManager()
assert len(manager) == 0
def test_manager_no_default_session_when_empty(self):
"""Test that accessing default_session_id raises when empty."""
manager = SessionManager()
with pytest.raises(RuntimeError, match="No sessions registered"):
_ = manager.default_session_id
class TestSessionManagerAdd:
"""Test SessionManager add functionality."""
def test_add_single_session(self):
"""Test adding a single session."""
manager = SessionManager()
session = MockSession()
manager.add("test", session)
assert len(manager) == 1
assert "test" in manager
assert manager.session_ids == ["test"]
def test_first_session_becomes_default(self):
"""Test that first added session becomes default."""
manager = SessionManager()
session = MockSession()
manager.add("first", session)
assert manager.default_session_id == "first"
def test_add_multiple_sessions(self):
"""Test adding multiple sessions."""
manager = SessionManager()
manager.add("session1", MockSession("s1"))
manager.add("session2", MockSession("s2"))
manager.add("session3", MockSession("s3"))
assert len(manager) == 3
assert "session1" in manager
assert "session2" in manager
assert "session3" in manager
def test_explicit_default_session(self):
"""Test setting explicit default session."""
manager = SessionManager()
manager.add("first", MockSession())
manager.add("second", MockSession(), default=True)
assert manager.default_session_id == "second"
def test_add_duplicate_id_raises(self):
"""Test that adding duplicate session ID raises."""
manager = SessionManager()
manager.add("test", MockSession())
with pytest.raises(ValueError, match="already registered"):
manager.add("test", MockSession())
def test_add_returns_self_for_chaining(self):
"""Test that add returns self for method chaining."""
manager = SessionManager()
result = manager.add("test", MockSession())
assert result is manager
def test_method_chaining(self):
"""Test fluent interface for adding sessions."""
manager = SessionManager()
manager.add("s1", MockSession()).add("s2", MockSession()).add("s3", MockSession())
assert len(manager) == 3
def test_add_lazy_session(self):
"""Test adding lazy session."""
manager = SessionManager()
manager.add("lazy", MockSession(), lazy=True)
assert "lazy" in manager
assert "lazy" in manager._lazy_sessions
class TestSessionManagerRemove:
"""Test SessionManager remove/pop functionality."""
def test_remove_session(self):
"""Test removing a session."""
manager = SessionManager()
manager.add("test", MockSession())
manager.remove("test")
assert "test" not in manager
assert len(manager) == 0
def test_remove_nonexistent_raises(self):
"""Test removing nonexistent session raises."""
manager = SessionManager()
with pytest.raises(KeyError, match="not found"):
manager.remove("nonexistent")
def test_pop_returns_session(self):
"""Test pop returns the removed session."""
manager = SessionManager()
session = MockSession("original")
manager.add("test", session)
popped = manager.pop("test")
assert popped is session
assert "test" not in manager
def test_remove_default_updates_default(self):
"""Test that removing default session updates default."""
manager = SessionManager()
manager.add("first", MockSession())
manager.add("second", MockSession())
assert manager.default_session_id == "first"
manager.remove("first")
assert manager.default_session_id == "second"
def test_remove_lazy_session_cleans_up(self):
"""Test that removing lazy session cleans up lazy set."""
manager = SessionManager()
manager.add("lazy", MockSession(), lazy=True)
manager.remove("lazy")
assert "lazy" not in manager._lazy_sessions
class TestSessionManagerGet:
"""Test SessionManager get functionality."""
def test_get_existing_session(self):
"""Test getting an existing session."""
manager = SessionManager()
session = MockSession("test")
manager.add("test", session)
retrieved = manager.get("test")
assert retrieved is session
def test_get_nonexistent_raises_with_available(self):
"""Test getting nonexistent session shows available sessions."""
manager = SessionManager()
manager.add("session1", MockSession())
manager.add("session2", MockSession())
with pytest.raises(KeyError, match="Available:"):
manager.get("nonexistent")
class TestSessionManagerContains:
"""Test SessionManager contains functionality."""
def test_contains_existing(self):
"""Test contains for existing session."""
manager = SessionManager()
manager.add("test", MockSession())
assert "test" in manager
def test_not_contains_missing(self):
"""Test contains for missing session."""
manager = SessionManager()
manager.add("test", MockSession())
assert "other" not in manager
class TestSessionManagerAsyncContext:
"""Test SessionManager async context manager."""
@pytest.mark.asyncio
async def test_start_activates_sessions(self):
"""Test that start activates non-lazy sessions."""
manager = SessionManager()
session = MockSession()
manager.add("test", session)
await manager.start()
assert session._is_alive is True
assert manager._started is True
@pytest.mark.asyncio
async def test_start_skips_lazy_sessions(self):
"""Test that start skips lazy sessions."""
manager = SessionManager()
eager_session = MockSession("eager")
lazy_session = MockSession("lazy")
manager.add("eager", eager_session)
manager.add("lazy", lazy_session, lazy=True)
await manager.start()
assert eager_session._is_alive is True
assert lazy_session._is_alive is False
@pytest.mark.asyncio
async def test_close_deactivates_sessions(self):
"""Test that close deactivates all sessions."""
manager = SessionManager()
session = MockSession()
manager.add("test", session)
await manager.start()
assert session._is_alive is True
await manager.close()
assert session._is_alive is False
assert manager._started is False
@pytest.mark.asyncio
async def test_async_context_manager(self):
"""Test using SessionManager as async context manager."""
manager = SessionManager()
session = MockSession()
manager.add("test", session)
async with manager:
assert session._is_alive is True
assert session._is_alive is False
@pytest.mark.asyncio
async def test_start_idempotent(self):
"""Test that calling start multiple times is safe."""
manager = SessionManager()
session = MockSession()
manager.add("test", session)
await manager.start()
await manager.start() # Should not raise or double-start
assert session._started is True
class TestSessionManagerProperties:
"""Test SessionManager properties."""
def test_session_ids_returns_list(self):
"""Test session_ids returns list of IDs."""
manager = SessionManager()
manager.add("a", MockSession())
manager.add("b", MockSession())
manager.add("c", MockSession())
ids = manager.session_ids
assert isinstance(ids, list)
assert set(ids) == {"a", "b", "c"}
def test_len_returns_session_count(self):
"""Test len returns number of sessions."""
manager = SessionManager()
assert len(manager) == 0
manager.add("s1", MockSession())
assert len(manager) == 1
manager.add("s2", MockSession())
assert len(manager) == 2
class TestSessionManagerIntegration:
"""Integration tests for SessionManager."""
def test_realistic_setup(self):
"""Test realistic session manager setup."""
manager = SessionManager()
# Add different types of sessions
manager.add("default", MockSession("default"))
manager.add("backup", MockSession("backup"))
manager.add("lazy_special", MockSession("special"), lazy=True)
assert len(manager) == 3
assert manager.default_session_id == "default"
assert "lazy_special" in manager._lazy_sessions
@pytest.mark.asyncio
async def test_lifecycle_management(self):
"""Test complete lifecycle of session manager."""
manager = SessionManager()
sessions = [MockSession(f"s{i}") for i in range(3)]
for i, session in enumerate(sessions):
manager.add(f"session{i}", session)
# Before start - no sessions active
assert all(not s._is_alive for s in sessions)
# After start - all active
await manager.start()
assert all(s._is_alive for s in sessions)
# After close - all inactive
await manager.close()
assert all(not s._is_alive for s in sessions)
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/spiders/test_session.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/spiders/test_spider.py | """Tests for the Spider class and related components."""
import logging
import tempfile
from pathlib import Path
import pytest
from scrapling.spiders.spider import Spider, SessionConfigurationError, LogCounterHandler, BLOCKED_CODES
from scrapling.spiders.request import Request
from scrapling.spiders.session import SessionManager
from scrapling.spiders.result import CrawlStats
from scrapling.core._types import Any, Dict, AsyncGenerator
class TestLogCounterHandler:
"""Test LogCounterHandler for tracking log counts."""
def test_initial_counts_are_zero(self):
"""Test that handler starts with zero counts."""
handler = LogCounterHandler()
counts = handler.get_counts()
assert counts["debug"] == 0
assert counts["info"] == 0
assert counts["warning"] == 0
assert counts["error"] == 0
assert counts["critical"] == 0
def test_counts_debug_messages(self):
"""Test counting debug level messages."""
handler = LogCounterHandler()
record = logging.LogRecord(
name="test",
level=logging.DEBUG,
pathname="",
lineno=0,
msg="test",
args=(),
exc_info=None,
)
handler.emit(record)
handler.emit(record)
assert handler.get_counts()["debug"] == 2
def test_counts_info_messages(self):
"""Test counting info level messages."""
handler = LogCounterHandler()
record = logging.LogRecord(
name="test",
level=logging.INFO,
pathname="",
lineno=0,
msg="test",
args=(),
exc_info=None,
)
handler.emit(record)
assert handler.get_counts()["info"] == 1
def test_counts_warning_messages(self):
"""Test counting warning level messages."""
handler = LogCounterHandler()
record = logging.LogRecord(
name="test",
level=logging.WARNING,
pathname="",
lineno=0,
msg="test",
args=(),
exc_info=None,
)
handler.emit(record)
assert handler.get_counts()["warning"] == 1
def test_counts_error_messages(self):
"""Test counting error level messages."""
handler = LogCounterHandler()
record = logging.LogRecord(
name="test",
level=logging.ERROR,
pathname="",
lineno=0,
msg="test",
args=(),
exc_info=None,
)
handler.emit(record)
assert handler.get_counts()["error"] == 1
def test_counts_critical_messages(self):
"""Test counting critical level messages."""
handler = LogCounterHandler()
record = logging.LogRecord(
name="test",
level=logging.CRITICAL,
pathname="",
lineno=0,
msg="test",
args=(),
exc_info=None,
)
handler.emit(record)
assert handler.get_counts()["critical"] == 1
def test_counts_multiple_levels(self):
"""Test counting messages at different levels."""
handler = LogCounterHandler()
levels = [
logging.DEBUG,
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.ERROR,
logging.ERROR,
logging.CRITICAL,
]
for level in levels:
record = logging.LogRecord(
name="test",
level=level,
pathname="",
lineno=0,
msg="test",
args=(),
exc_info=None,
)
handler.emit(record)
counts = handler.get_counts()
assert counts["debug"] == 2
assert counts["info"] == 1
assert counts["warning"] == 1
assert counts["error"] == 3
assert counts["critical"] == 1
class TestBlockedCodes:
"""Test BLOCKED_CODES constant."""
def test_blocked_codes_contains_expected_values(self):
"""Test that BLOCKED_CODES contains expected HTTP status codes."""
assert 401 in BLOCKED_CODES # Unauthorized
assert 403 in BLOCKED_CODES # Forbidden
assert 407 in BLOCKED_CODES # Proxy Authentication Required
assert 429 in BLOCKED_CODES # Too Many Requests
assert 444 in BLOCKED_CODES # Connection Closed Without Response (nginx)
assert 500 in BLOCKED_CODES # Internal Server Error
assert 502 in BLOCKED_CODES # Bad Gateway
assert 503 in BLOCKED_CODES # Service Unavailable
assert 504 in BLOCKED_CODES # Gateway Timeout
def test_blocked_codes_does_not_contain_success(self):
"""Test that success codes are not blocked."""
assert 200 not in BLOCKED_CODES
assert 201 not in BLOCKED_CODES
assert 204 not in BLOCKED_CODES
assert 301 not in BLOCKED_CODES
assert 302 not in BLOCKED_CODES
class ConcreteSpider(Spider):
"""Concrete spider implementation for testing."""
name = "test_spider"
start_urls = ["https://example.com"]
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield {"url": str(response)}
class TestSpiderInit:
"""Test Spider initialization."""
def test_spider_requires_name(self):
"""Test that spider without name raises ValueError."""
class NoNameSpider(Spider):
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
with pytest.raises(ValueError, match="must have a name"):
NoNameSpider()
def test_spider_initializes_logger(self):
"""Test that spider creates a logger."""
spider = ConcreteSpider()
assert spider.logger is not None
assert spider.logger.name == "scrapling.spiders.test_spider"
def test_spider_logger_has_log_counter(self):
"""Test that spider logger has log counter handler."""
spider = ConcreteSpider()
assert spider._log_counter is not None
assert isinstance(spider._log_counter, LogCounterHandler)
def test_spider_with_crawldir(self):
"""Test spider initialization with crawldir."""
with tempfile.TemporaryDirectory() as tmpdir:
spider = ConcreteSpider(crawldir=tmpdir)
assert spider.crawldir == Path(tmpdir)
def test_spider_without_crawldir(self):
"""Test spider initialization without crawldir."""
spider = ConcreteSpider()
assert spider.crawldir is None
def test_spider_custom_interval(self):
"""Test spider with custom checkpoint interval."""
spider = ConcreteSpider(interval=60.0)
assert spider._interval == 60.0
def test_spider_default_interval(self):
"""Test spider has default checkpoint interval."""
spider = ConcreteSpider()
assert spider._interval == 300.0
def test_spider_repr(self):
"""Test spider string representation."""
spider = ConcreteSpider()
repr_str = repr(spider)
assert "ConcreteSpider" in repr_str
assert "test_spider" in repr_str
class TestSpiderClassAttributes:
"""Test Spider class attribute defaults."""
def test_default_concurrent_requests(self):
"""Test default concurrent_requests is 4."""
assert ConcreteSpider.concurrent_requests == 4
def test_default_concurrent_requests_per_domain(self):
"""Test default concurrent_requests_per_domain is 0 (disabled)."""
assert ConcreteSpider.concurrent_requests_per_domain == 0
def test_default_download_delay(self):
"""Test default download_delay is 0."""
assert ConcreteSpider.download_delay == 0.0
def test_default_max_blocked_retries(self):
"""Test default max_blocked_retries is 3."""
assert ConcreteSpider.max_blocked_retries == 3
def test_default_logging_level(self):
"""Test default logging level is DEBUG."""
assert ConcreteSpider.logging_level == logging.DEBUG
def test_default_allowed_domains_empty(self):
"""Test default allowed_domains is empty set."""
assert ConcreteSpider.allowed_domains == set()
class TestSpiderSessionConfiguration:
"""Test Spider session configuration."""
def test_default_configure_sessions(self):
"""Test that default configure_sessions adds a session."""
spider = ConcreteSpider()
assert len(spider._session_manager) > 0
def test_configure_sessions_error_raises_custom_exception(self):
"""Test that errors in configure_sessions raise SessionConfigurationError."""
class BadSessionSpider(Spider):
name = "bad_spider"
def configure_sessions(self, manager: SessionManager) -> None:
raise RuntimeError("Configuration failed!")
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
with pytest.raises(SessionConfigurationError, match="Configuration failed"):
BadSessionSpider()
def test_configure_sessions_no_sessions_raises(self):
"""Test that not adding any sessions raises SessionConfigurationError."""
class NoSessionSpider(Spider):
name = "no_session_spider"
def configure_sessions(self, manager: SessionManager) -> None:
pass # Don't add any sessions
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
with pytest.raises(SessionConfigurationError, match="did not add any sessions"):
NoSessionSpider()
class TestSpiderStartRequests:
"""Test Spider start_requests method."""
@pytest.mark.asyncio
async def test_start_requests_yields_from_start_urls(self):
"""Test that start_requests yields requests for start_urls."""
class MultiUrlSpider(Spider):
name = "multi_url"
start_urls = [
"https://example.com/1",
"https://example.com/2",
"https://example.com/3",
]
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = MultiUrlSpider()
requests = [r async for r in spider.start_requests()]
assert len(requests) == 3
assert requests[0].url == "https://example.com/1"
assert requests[1].url == "https://example.com/2"
assert requests[2].url == "https://example.com/3"
@pytest.mark.asyncio
async def test_start_requests_no_urls_raises(self):
"""Test that start_requests raises when no start_urls."""
class NoUrlSpider(Spider):
name = "no_url"
start_urls = []
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = NoUrlSpider()
with pytest.raises(RuntimeError, match="no starting point"):
async for _ in spider.start_requests():
pass
@pytest.mark.asyncio
async def test_start_requests_uses_default_session(self):
"""Test that start_requests uses default session ID."""
spider = ConcreteSpider()
requests = [r async for r in spider.start_requests()]
# Should use the default session from session manager
default_sid = spider._session_manager.default_session_id
assert requests[0].sid == default_sid
class TestSpiderHooks:
"""Test Spider lifecycle hooks."""
@pytest.mark.asyncio
async def test_on_start_default(self):
"""Test default on_start doesn't raise."""
spider = ConcreteSpider()
# Should not raise
await spider.on_start(resuming=False)
await spider.on_start(resuming=True)
@pytest.mark.asyncio
async def test_on_close_default(self):
"""Test default on_close doesn't raise."""
spider = ConcreteSpider()
# Should not raise
await spider.on_close()
@pytest.mark.asyncio
async def test_on_error_default(self):
"""Test default on_error logs the error."""
spider = ConcreteSpider()
request = Request("https://example.com")
error = ValueError("test error")
# Should not raise
await spider.on_error(request, error)
@pytest.mark.asyncio
async def test_on_scraped_item_default_returns_item(self):
"""Test default on_scraped_item returns the item unchanged."""
spider = ConcreteSpider()
item = {"key": "value", "nested": {"a": 1}}
result = await spider.on_scraped_item(item)
assert result == item
@pytest.mark.asyncio
async def test_is_blocked_default_checks_status_codes(self):
"""Test default is_blocked checks blocked status codes."""
class MockResponse:
def __init__(self, status: int):
self.status = status
spider = ConcreteSpider()
# Test blocked codes
assert await spider.is_blocked(MockResponse(403)) is True
assert await spider.is_blocked(MockResponse(429)) is True
assert await spider.is_blocked(MockResponse(503)) is True
# Test non-blocked codes
assert await spider.is_blocked(MockResponse(200)) is False
assert await spider.is_blocked(MockResponse(404)) is False
@pytest.mark.asyncio
async def test_retry_blocked_request_default_returns_request(self):
"""Test default retry_blocked_request returns the request unchanged."""
class MockResponse:
status = 429
spider = ConcreteSpider()
request = Request("https://example.com", priority=5)
result = await spider.retry_blocked_request(request, MockResponse())
assert result is request
class TestSpiderPause:
"""Test Spider pause functionality."""
def test_pause_without_engine_raises(self):
"""Test that pause without active engine raises RuntimeError."""
spider = ConcreteSpider()
with pytest.raises(RuntimeError, match="No active crawl to stop"):
spider.pause()
class TestSpiderStats:
"""Test Spider stats property."""
def test_stats_without_engine_raises(self):
"""Test that accessing stats without active crawl raises."""
spider = ConcreteSpider()
with pytest.raises(RuntimeError, match="No active crawl"):
_ = spider.stats
class TestSpiderCustomization:
"""Test Spider customization patterns."""
def test_custom_concurrent_requests(self):
"""Test spider with custom concurrent_requests."""
class CustomSpider(Spider):
name = "custom"
concurrent_requests = 32
start_urls = ["https://example.com"]
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = CustomSpider()
assert spider.concurrent_requests == 32
def test_custom_allowed_domains(self):
"""Test spider with allowed_domains."""
class DomainSpider(Spider):
name = "domain_spider"
start_urls = ["https://example.com"]
allowed_domains = {"example.com", "api.example.com"}
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = DomainSpider()
assert "example.com" in spider.allowed_domains
assert "api.example.com" in spider.allowed_domains
def test_custom_download_delay(self):
"""Test spider with download delay."""
class SlowSpider(Spider):
name = "slow"
download_delay = 1.5
start_urls = ["https://example.com"]
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = SlowSpider()
assert spider.download_delay == 1.5
class TestSpiderLogging:
"""Test Spider logging configuration."""
def test_custom_logging_level(self):
"""Test spider with custom logging level."""
class QuietSpider(Spider):
name = "quiet"
logging_level = logging.WARNING
start_urls = ["https://example.com"]
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = QuietSpider()
assert spider.logger.level == logging.WARNING
def test_log_file_creates_handler(self):
"""Test spider with log file creates file handler."""
with tempfile.TemporaryDirectory() as tmpdir:
log_path = Path(tmpdir) / "spider.log"
class FileLogSpider(Spider):
name = "file_log"
log_file = str(log_path)
start_urls = ["https://example.com"]
async def parse(self, response) -> AsyncGenerator[Dict[str, Any] | Request | None, None]:
yield None
spider = FileLogSpider()
# Should have a file handler
file_handlers = [
h for h in spider.logger.handlers if isinstance(h, logging.FileHandler)
]
assert len(file_handlers) == 1
# Clean up
for h in file_handlers:
h.close()
def test_logger_does_not_propagate(self):
"""Test that spider logger does not propagate to parent."""
spider = ConcreteSpider()
assert spider.logger.propagate is False
class TestSessionConfigurationError:
"""Test SessionConfigurationError exception."""
def test_exception_message(self):
"""Test that exception preserves message."""
error = SessionConfigurationError("Custom error message")
assert str(error) == "Custom error message"
def test_exception_is_exception(self):
"""Test that it's a proper exception."""
error = SessionConfigurationError("test")
assert isinstance(error, Exception)
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/spiders/test_spider.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 421,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:scrapling/fetchers/stealth_chrome.py | from scrapling.core._types import Unpack
from scrapling.engines._browsers._types import StealthSession
from scrapling.engines.toolbelt.custom import BaseFetcher, Response
from scrapling.engines._browsers._stealth import StealthySession, AsyncStealthySession
class StealthyFetcher(BaseFetcher):
"""A `Fetcher` class type which is a completely stealthy built on top of Chromium.
It works as real browsers passing almost all online tests/protections with many customization options.
"""
@classmethod
def fetch(cls, url: str, **kwargs: Unpack[StealthSession]) -> Response:
"""
Opens up a browser and do your request based on your chosen options below.
:param url: Target url.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param blocked_domains: A set of domain names to block requests to. Subdomains are also matched (e.g., ``"example.com"`` blocks ``"sub.example.com"`` too).
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param page_action: Added for automation. A function that takes the `page` object and does the automation you need.
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param init_script: An absolute path to a JavaScript file to be executed on page creation for all pages in this session.
:param locale: Specify user locale, for example, `en-GB`, `de-DE`, etc. Locale will affect navigator.language value, Accept-Language request header value as well as number and date formatting
rules. Defaults to the system default locale.
:param timezone_id: Changes the timezone of the browser. Defaults to the system timezone.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param solve_cloudflare: Solves all types of the Cloudflare's Turnstile/Interstitial challenges before returning the response to you.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
:param block_webrtc: Forces WebRTC to respect proxy settings to prevent local IP address leak.
:param allow_webgl: Enabled by default. Disabling it disables WebGL and WebGL 2.0 support entirely. Disabling WebGL is not recommended as many WAFs now check if WebGL is enabled.
:param load_dom: Enabled by default, wait for all JavaScript on page(s) to fully load and execute.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param user_data_dir: Path to a User Data Directory, which stores browser session data like cookies and local storage. The default is to create a temporary directory.
:param extra_flags: A list of additional browser flags to pass to the browser on launch.
:param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
:param additional_args: Additional arguments to be passed to Playwright's context as additional settings, and it takes higher priority than Scrapling's settings.
:return: A `Response` object.
"""
selector_config = kwargs.get("selector_config", {}) or kwargs.get(
"custom_config", {}
) # Checking `custom_config` for backward compatibility
if not isinstance(selector_config, dict):
raise TypeError("Argument `selector_config` must be a dictionary.")
kwargs["selector_config"] = {**cls._generate_parser_arguments(), **selector_config}
with StealthySession(**kwargs) as engine:
return engine.fetch(url)
@classmethod
async def async_fetch(cls, url: str, **kwargs: Unpack[StealthSession]) -> Response:
"""
Opens up a browser and do your request based on your chosen options below.
:param url: Target url.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param blocked_domains: A set of domain names to block requests to. Subdomains are also matched (e.g., ``"example.com"`` blocks ``"sub.example.com"`` too).
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param page_action: Added for automation. A function that takes the `page` object and does the automation you need.
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param init_script: An absolute path to a JavaScript file to be executed on page creation for all pages in this session.
:param locale: Specify user locale, for example, `en-GB`, `de-DE`, etc. Locale will affect navigator.language value, Accept-Language request header value as well as number and date formatting
rules. Defaults to the system default locale.
:param timezone_id: Changes the timezone of the browser. Defaults to the system timezone.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param solve_cloudflare: Solves all types of the Cloudflare's Turnstile/Interstitial challenges before returning the response to you.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
:param block_webrtc: Forces WebRTC to respect proxy settings to prevent local IP address leak.
:param allow_webgl: Enabled by default. Disabling it disables WebGL and WebGL 2.0 support entirely. Disabling WebGL is not recommended as many WAFs now check if WebGL is enabled.
:param load_dom: Enabled by default, wait for all JavaScript on page(s) to fully load and execute.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param user_data_dir: Path to a User Data Directory, which stores browser session data like cookies and local storage. The default is to create a temporary directory.
:param extra_flags: A list of additional browser flags to pass to the browser on launch.
:param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
:param additional_args: Additional arguments to be passed to Playwright's context as additional settings, and it takes higher priority than Scrapling's settings.
:return: A `Response` object.
"""
selector_config = kwargs.get("selector_config", {}) or kwargs.get(
"custom_config", {}
) # Checking `custom_config` for backward compatibility
if not isinstance(selector_config, dict):
raise TypeError("Argument `selector_config` must be a dictionary.")
kwargs["selector_config"] = {**cls._generate_parser_arguments(), **selector_config}
async with AsyncStealthySession(**kwargs) as engine:
return await engine.fetch(url)
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/fetchers/stealth_chrome.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
D4Vinci/Scrapling:scrapling/core/_shell_signatures.py | from scrapling.core._types import (
Any,
Dict,
List,
Tuple,
Sequence,
Callable,
Optional,
SetCookieParam,
SelectorWaitStates,
)
# Parameter definitions for shell function signatures (defined once at module level)
# Mirrors TypedDict definitions from _types.py but runtime-accessible for IPython introspection
_REQUESTS_PARAMS = {
"params": Optional[Dict | List | Tuple],
"cookies": Any,
"auth": Optional[Tuple[str, str]],
"impersonate": Any,
"http3": Optional[bool],
"stealthy_headers": Optional[bool],
"proxies": Any,
"proxy": Optional[str],
"proxy_auth": Optional[Tuple[str, str]],
"timeout": Optional[int | float],
"headers": Any,
"retries": Optional[int],
"retry_delay": Optional[int],
"follow_redirects": Optional[bool],
"max_redirects": Optional[int],
"verify": Optional[bool],
"cert": Optional[str | Tuple[str, str]],
"selector_config": Optional[Dict],
}
_FETCH_PARAMS = {
"headless": bool,
"disable_resources": bool,
"network_idle": bool,
"load_dom": bool,
"wait_selector": Optional[str],
"wait_selector_state": SelectorWaitStates,
"cookies": Sequence[SetCookieParam],
"google_search": bool,
"wait": int | float,
"timezone_id": str | None,
"page_action": Optional[Callable],
"proxy": Optional[str | Dict[str, str] | Tuple],
"extra_headers": Optional[Dict[str, str]],
"timeout": int | float,
"init_script": Optional[str],
"user_data_dir": str,
"selector_config": Optional[Dict],
"additional_args": Optional[Dict],
"locale": Optional[str],
"real_chrome": bool,
"cdp_url": Optional[str],
"useragent": Optional[str],
"extra_flags": Optional[List[str]],
}
_STEALTHY_FETCH_PARAMS = {
"headless": bool,
"disable_resources": bool,
"network_idle": bool,
"load_dom": bool,
"wait_selector": Optional[str],
"wait_selector_state": SelectorWaitStates,
"cookies": Sequence[SetCookieParam],
"google_search": bool,
"wait": int | float,
"timezone_id": str | None,
"page_action": Optional[Callable],
"proxy": Optional[str | Dict[str, str] | Tuple],
"extra_headers": Optional[Dict[str, str]],
"timeout": int | float,
"init_script": Optional[str],
"user_data_dir": str,
"selector_config": Optional[Dict],
"additional_args": Optional[Dict],
"locale": Optional[str],
"real_chrome": bool,
"cdp_url": Optional[str],
"useragent": Optional[str],
"extra_flags": Optional[List[str]],
"allow_webgl": bool,
"hide_canvas": bool,
"block_webrtc": bool,
"solve_cloudflare": bool,
}
# Mapping of function names to their parameter definitions
Signatures_map = {
"get": _REQUESTS_PARAMS,
"post": {**_REQUESTS_PARAMS, "data": Optional[Dict | str], "json": Optional[Dict | List]},
"put": {**_REQUESTS_PARAMS, "data": Optional[Dict | str], "json": Optional[Dict | List]},
"delete": _REQUESTS_PARAMS,
"fetch": _FETCH_PARAMS,
"stealthy_fetch": _STEALTHY_FETCH_PARAMS,
}
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/core/_shell_signatures.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
D4Vinci/Scrapling:scrapling/engines/_browsers/_types.py | from io import BytesIO
from curl_cffi.requests import (
ProxySpec,
CookieTypes,
BrowserTypeLiteral,
)
from scrapling.core._types import (
Dict,
List,
Set,
Tuple,
Mapping,
Optional,
Callable,
Sequence,
TypedDict,
TypeAlias,
SetCookieParam,
SelectorWaitStates,
)
from scrapling.engines.toolbelt.proxy_rotation import ProxyRotator
# Type alias for `impersonate` parameter - accepts a single browser or list of browsers
ImpersonateType: TypeAlias = BrowserTypeLiteral | List[BrowserTypeLiteral] | None
# Types for session initialization
class RequestsSession(TypedDict, total=False):
impersonate: ImpersonateType
http3: Optional[bool]
stealthy_headers: Optional[bool]
proxies: Optional[ProxySpec]
proxy: Optional[str]
proxy_auth: Optional[Tuple[str, str]]
proxy_rotator: Optional[ProxyRotator]
timeout: Optional[int | float]
headers: Optional[Mapping[str, Optional[str]]]
retries: Optional[int]
retry_delay: Optional[int]
follow_redirects: Optional[bool]
max_redirects: Optional[int]
verify: Optional[bool]
cert: Optional[str | Tuple[str, str]]
selector_config: Optional[Dict]
# Types for GET request method parameters
class GetRequestParams(RequestsSession, total=False):
params: Optional[Dict | List | Tuple]
cookies: Optional[CookieTypes]
auth: Optional[Tuple[str, str]]
# Types for POST/PUT/DELETE request method parameters
class DataRequestParams(GetRequestParams, total=False):
data: Optional[Dict[str, str] | List[Tuple] | str | BytesIO | bytes]
json: Optional[Dict | List]
# Types for browser session
class PlaywrightSession(TypedDict, total=False):
max_pages: int
headless: bool
disable_resources: bool
network_idle: bool
load_dom: bool
wait_selector: Optional[str]
wait_selector_state: SelectorWaitStates
cookies: Sequence[SetCookieParam] | None
google_search: bool
wait: int | float
timezone_id: str | None
page_action: Optional[Callable]
proxy: Optional[str | Dict[str, str] | Tuple]
proxy_rotator: Optional[ProxyRotator]
extra_headers: Optional[Dict[str, str]]
timeout: int | float
init_script: Optional[str]
user_data_dir: str
selector_config: Optional[Dict]
additional_args: Optional[Dict]
locale: Optional[str]
real_chrome: bool
cdp_url: Optional[str]
useragent: Optional[str]
extra_flags: Optional[List[str]]
blocked_domains: Optional[Set[str]]
retries: int
retry_delay: int | float
class PlaywrightFetchParams(TypedDict, total=False):
load_dom: bool
wait: int | float
network_idle: bool
google_search: bool
timeout: int | float
disable_resources: bool
wait_selector: Optional[str]
page_action: Optional[Callable]
selector_config: Optional[Dict]
extra_headers: Optional[Dict[str, str]]
wait_selector_state: SelectorWaitStates
blocked_domains: Optional[Set[str]]
proxy: Optional[str | Dict[str, str]]
class StealthSession(PlaywrightSession, total=False):
allow_webgl: bool
hide_canvas: bool
block_webrtc: bool
solve_cloudflare: bool
class StealthFetchParams(PlaywrightFetchParams, total=False):
solve_cloudflare: bool
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/engines/_browsers/_types.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
D4Vinci/Scrapling:tests/fetchers/test_impersonate_list.py | """Test suite for list-based impersonate parameter functionality."""
import pytest
import pytest_httpbin
from unittest.mock import patch, MagicMock
from scrapling import Fetcher
from scrapling.fetchers import FetcherSession
from scrapling.engines.static import _select_random_browser
class TestRandomBrowserSelection:
"""Test the random browser selection helper function."""
def test_select_random_browser_with_single_string(self):
"""Test that single browser string is returned as-is."""
result = _select_random_browser("chrome")
assert result == "chrome"
def test_select_random_browser_with_none(self):
"""Test that None is returned as-is."""
result = _select_random_browser(None)
assert result is None
def test_select_random_browser_with_list(self):
"""Test that a browser is randomly selected from a list."""
browsers = ["chrome", "firefox", "safari"]
result = _select_random_browser(browsers)
assert result in browsers
def test_select_random_browser_with_empty_list(self):
"""Test that empty list returns None."""
result = _select_random_browser([])
assert result is None
def test_select_random_browser_with_single_item_list(self):
"""Test that single-item list returns that item."""
result = _select_random_browser(["chrome"])
assert result == "chrome"
@pytest_httpbin.use_class_based_httpbin
class TestFetcherWithImpersonateList:
"""Test Fetcher with list-based impersonate parameter."""
@pytest.fixture(autouse=True)
def setup_urls(self, httpbin):
"""Fixture to set up URLs for testing."""
self.basic_url = f"{httpbin.url}/get"
def test_get_with_impersonate_list(self):
"""Test that GET request works with impersonate as a list."""
browsers = ["chrome", "firefox"]
response = Fetcher.get(self.basic_url, impersonate=browsers)
assert response.status == 200
def test_get_with_single_impersonate(self):
"""Test that GET request still works with single browser string."""
response = Fetcher.get(self.basic_url, impersonate="chrome")
assert response.status == 200
def test_post_with_impersonate_list(self):
"""Test that POST request works with impersonate as a list."""
browsers = ["chrome", "firefox"]
post_url = self.basic_url.replace("/get", "/post")
response = Fetcher.post(post_url, data={"key": "value"}, impersonate=browsers)
assert response.status == 200
def test_put_with_impersonate_list(self):
"""Test that PUT request works with impersonate as a list."""
browsers = ["chrome", "safari"]
put_url = self.basic_url.replace("/get", "/put")
response = Fetcher.put(put_url, data={"key": "value"}, impersonate=browsers)
assert response.status == 200
def test_delete_with_impersonate_list(self):
"""Test that DELETE request works with impersonate as a list."""
browsers = ["chrome", "edge"]
delete_url = self.basic_url.replace("/get", "/delete")
response = Fetcher.delete(delete_url, impersonate=browsers)
assert response.status == 200
@pytest_httpbin.use_class_based_httpbin
class TestFetcherSessionWithImpersonateList:
"""Test FetcherSession with list-based impersonate parameter."""
@pytest.fixture(autouse=True)
def setup_urls(self, httpbin):
"""Fixture to set up URLs for testing."""
self.basic_url = f"{httpbin.url}/get"
def test_session_init_with_impersonate_list(self):
"""Test that FetcherSession can be initialized with impersonate as a list."""
browsers = ["chrome", "firefox", "safari"]
session = FetcherSession(impersonate=browsers)
assert session._default_impersonate == browsers
def test_session_request_with_impersonate_list(self):
"""Test that session request works with impersonate as a list."""
browsers = ["chrome", "firefox"]
with FetcherSession(impersonate=browsers) as session:
response = session.get(self.basic_url)
assert response.status == 200
def test_session_multiple_requests_with_impersonate_list(self):
"""Test that multiple requests in a session work with impersonate list."""
browsers = ["chrome110", "chrome120", "chrome131"]
with FetcherSession(impersonate=browsers) as session:
response1 = session.get(self.basic_url)
response2 = session.get(self.basic_url)
assert response1.status == 200
assert response2.status == 200
def test_session_request_level_impersonate_override(self):
"""Test that request-level impersonate overrides session-level."""
session_browsers = ["chrome", "firefox"]
request_browser = "safari"
with FetcherSession(impersonate=session_browsers) as session:
response = session.get(self.basic_url, impersonate=request_browser)
assert response.status == 200
def test_session_request_level_impersonate_list_override(self):
"""Test that request-level impersonate list overrides session-level."""
session_browsers = ["chrome", "firefox"]
request_browsers = ["safari", "edge"]
with FetcherSession(impersonate=session_browsers) as session:
response = session.get(self.basic_url, impersonate=request_browsers)
assert response.status == 200
class TestImpersonateTypeValidation:
"""Test type validation for impersonate parameter."""
def test_impersonate_accepts_string(self):
"""Test that impersonate accepts string type."""
# This should not raise any type errors
session = FetcherSession(impersonate="chrome")
assert session._default_impersonate == "chrome"
def test_impersonate_accepts_list(self):
"""Test that impersonate accepts list type."""
# This should not raise any type errors
browsers = ["chrome", "firefox"]
session = FetcherSession(impersonate=browsers)
assert session._default_impersonate == browsers
def test_impersonate_accepts_none(self):
"""Test that impersonate accepts None."""
# This should not raise any type errors
session = FetcherSession(impersonate=None)
assert session._default_impersonate is None
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/test_impersonate_list.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:scrapling/fetchers/chrome.py | from scrapling.core._types import Unpack
from scrapling.engines._browsers._types import PlaywrightSession
from scrapling.engines.toolbelt.custom import BaseFetcher, Response
from scrapling.engines._browsers._controllers import DynamicSession, AsyncDynamicSession
class DynamicFetcher(BaseFetcher):
"""A `Fetcher` that provide many options to fetch/load websites' pages through chromium-based browsers."""
@classmethod
def fetch(cls, url: str, **kwargs: Unpack[PlaywrightSession]) -> Response:
"""Opens up a browser and do your request based on your chosen options below.
:param url: Target url.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
:param blocked_domains: A set of domain names to block requests to. Subdomains are also matched (e.g., ``"example.com"`` blocks ``"sub.example.com"`` too).
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param load_dom: Enabled by default, wait for all JavaScript on page(s) to fully load and execute.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the Response object.
:param page_action: Added for automation. A function that takes the `page` object and does the automation you need.
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param init_script: An absolute path to a JavaScript file to be executed on page creation with this request.
:param locale: Set the locale for the browser if wanted. Defaults to the system default locale.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request.
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param extra_flags: A list of additional browser flags to pass to the browser on launch.
:param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
:param additional_args: Additional arguments to be passed to Playwright's context as additional settings.
:return: A `Response` object.
"""
selector_config = kwargs.get("selector_config", {}) or kwargs.get(
"custom_config", {}
) # Checking `custom_config` for backward compatibility
if not isinstance(selector_config, dict):
raise TypeError("Argument `selector_config` must be a dictionary.")
kwargs["selector_config"] = {**cls._generate_parser_arguments(), **selector_config}
with DynamicSession(**kwargs) as session:
return session.fetch(url)
@classmethod
async def async_fetch(cls, url: str, **kwargs: Unpack[PlaywrightSession]) -> Response:
"""Opens up a browser and do your request based on your chosen options below.
:param url: Target url.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
:param blocked_domains: A set of domain names to block requests to. Subdomains are also matched (e.g., ``"example.com"`` blocks ``"sub.example.com"`` too).
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param load_dom: Enabled by default, wait for all JavaScript on page(s) to fully load and execute.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the Response object.
:param page_action: Added for automation. A function that takes the `page` object and does the automation you need.
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param init_script: An absolute path to a JavaScript file to be executed on page creation with this request.
:param locale: Set the locale for the browser if wanted. Defaults to the system default locale.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request.
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param extra_flags: A list of additional browser flags to pass to the browser on launch.
:param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
:param additional_args: Additional arguments to be passed to Playwright's context as additional settings.
:return: A `Response` object.
"""
selector_config = kwargs.get("selector_config", {}) or kwargs.get(
"custom_config", {}
) # Checking `custom_config` for backward compatibility
if not isinstance(selector_config, dict):
raise TypeError("Argument `selector_config` must be a dictionary.")
kwargs["selector_config"] = {**cls._generate_parser_arguments(), **selector_config}
async with AsyncDynamicSession(**kwargs) as session:
return await session.fetch(url)
PlayWrightFetcher = DynamicFetcher # For backward-compatibility
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/fetchers/chrome.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
D4Vinci/Scrapling:scrapling/fetchers/requests.py | from scrapling.engines.static import (
FetcherSession,
FetcherClient as _FetcherClient,
AsyncFetcherClient as _AsyncFetcherClient,
)
from scrapling.engines.toolbelt.custom import BaseFetcher
__FetcherClientInstance__ = _FetcherClient()
__AsyncFetcherClientInstance__ = _AsyncFetcherClient()
class Fetcher(BaseFetcher):
"""A basic `Fetcher` class type that can only do basic GET, POST, PUT, and DELETE HTTP requests based on `curl_cffi`."""
get = __FetcherClientInstance__.get
post = __FetcherClientInstance__.post
put = __FetcherClientInstance__.put
delete = __FetcherClientInstance__.delete
class AsyncFetcher(BaseFetcher):
"""A basic `Fetcher` class type that can only do basic GET, POST, PUT, and DELETE HTTP requests based on `curl_cffi`."""
get = __AsyncFetcherClientInstance__.get
post = __AsyncFetcherClientInstance__.post
put = __AsyncFetcherClientInstance__.put
delete = __AsyncFetcherClientInstance__.delete
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/fetchers/requests.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
D4Vinci/Scrapling:scrapling/core/utils/_shell.py | from http import cookies as Cookie
from scrapling.core._types import (
List,
Dict,
Tuple,
)
def _CookieParser(cookie_string):
# Errors will be handled on call so the log can be specified
cookie_parser = Cookie.SimpleCookie()
cookie_parser.load(cookie_string)
for key, morsel in cookie_parser.items():
yield key, morsel.value
def _ParseHeaders(header_lines: List[str], parse_cookies: bool = True) -> Tuple[Dict[str, str], Dict[str, str]]:
"""Parses headers into separate header and cookie dictionaries."""
header_dict = dict()
cookie_dict = dict()
for header_line in header_lines:
if ":" not in header_line:
if header_line.endswith(";"):
header_key = header_line[:-1].strip()
header_value = ""
header_dict[header_key] = header_value
else:
raise ValueError(f"Could not parse header without colon: '{header_line}'.")
else:
header_key, header_value = header_line.split(":", 1)
header_key = header_key.strip()
header_value = header_value.strip()
if parse_cookies:
if header_key.lower() == "cookie":
try:
cookie_dict = {key: value for key, value in _CookieParser(header_value)}
except Exception as e: # pragma: no cover
raise ValueError(f"Could not parse cookie string from header '{header_value}': {e}")
else:
header_dict[header_key] = header_value
else:
header_dict[header_key] = header_value
return header_dict, cookie_dict
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/core/utils/_shell.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
D4Vinci/Scrapling:scrapling/engines/_browsers/_base.py | from time import time
from asyncio import sleep as asyncio_sleep, Lock
from contextlib import contextmanager, asynccontextmanager
from playwright.sync_api._generated import Page
from playwright.sync_api import (
Frame,
BrowserContext,
Response as SyncPlaywrightResponse,
)
from playwright.async_api._generated import Page as AsyncPage
from playwright.async_api import (
Frame as AsyncFrame,
Response as AsyncPlaywrightResponse,
BrowserContext as AsyncBrowserContext,
)
from playwright._impl._errors import Error as PlaywrightError
from scrapling.parser import Selector
from scrapling.engines._browsers._page import PageInfo, PagePool
from scrapling.engines._browsers._validators import validate, PlaywrightConfig, StealthConfig
from scrapling.engines._browsers._config_tools import __default_chrome_useragent__, __default_useragent__
from scrapling.engines.toolbelt.navigation import (
construct_proxy_dict,
create_intercept_handler,
create_async_intercept_handler,
)
from scrapling.core._types import (
Any,
Dict,
List,
Set,
Optional,
Callable,
TYPE_CHECKING,
cast,
overload,
Tuple,
ProxyType,
Generator,
AsyncGenerator,
)
from scrapling.engines.constants import STEALTH_ARGS, HARMFUL_ARGS, DEFAULT_ARGS
class SyncSession:
_config: "PlaywrightConfig | StealthConfig"
_context_options: Dict[str, Any]
def _build_context_with_proxy(self, proxy: Optional[ProxyType] = None) -> Dict[str, Any]:
raise NotImplementedError # pragma: no cover
def __init__(self, max_pages: int = 1):
self.max_pages = max_pages
self.page_pool = PagePool(max_pages)
self._max_wait_for_page = 60
self.playwright: Any = None
self.context: Any = None
self.browser: Any = None
self._is_alive = False
def start(self) -> None:
pass
def close(self): # pragma: no cover
"""Close all resources"""
if not self._is_alive:
return
if self.context:
self.context.close()
self.context = None
if self.browser:
self.browser.close()
self.browser = None
if self.playwright:
self.playwright.stop()
self.playwright = None # pyright: ignore
self._is_alive = False
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _initialize_context(self, config: PlaywrightConfig | StealthConfig, ctx: BrowserContext) -> BrowserContext:
"""Initialize the browser context."""
if config.init_script:
ctx.add_init_script(path=config.init_script)
if config.cookies: # pragma: no cover
ctx.add_cookies(config.cookies)
return ctx
def _get_page(
self,
timeout: int | float,
extra_headers: Optional[Dict[str, str]],
disable_resources: bool,
blocked_domains: Optional[Set[str]] = None,
context: Optional[BrowserContext] = None,
) -> PageInfo[Page]: # pragma: no cover
"""Get a new page to use"""
# No need to check if a page is available or not in sync code because the code blocked before reaching here till the page closed, ofc.
ctx = context if context is not None else self.context
assert ctx is not None, "Browser context not initialized"
page = ctx.new_page()
page.set_default_navigation_timeout(timeout)
page.set_default_timeout(timeout)
if extra_headers:
page.set_extra_http_headers(extra_headers)
if disable_resources or blocked_domains:
page.route("**/*", create_intercept_handler(disable_resources, blocked_domains))
page_info = self.page_pool.add_page(page)
page_info.mark_busy()
return page_info
def get_pool_stats(self) -> Dict[str, int]:
"""Get statistics about the current page pool"""
return {
"total_pages": self.page_pool.pages_count,
"busy_pages": self.page_pool.busy_count,
"max_pages": self.max_pages,
}
@staticmethod
def _wait_for_networkidle(page: Page | Frame, timeout: Optional[int] = None):
"""Wait for the page to become idle (no network activity) even if there are never-ending requests."""
try:
page.wait_for_load_state("networkidle", timeout=timeout)
except (PlaywrightError, Exception):
pass
def _wait_for_page_stability(self, page: Page | Frame, load_dom: bool, network_idle: bool):
page.wait_for_load_state(state="load")
if load_dom:
page.wait_for_load_state(state="domcontentloaded")
if network_idle:
self._wait_for_networkidle(page)
@staticmethod
def _create_response_handler(page_info: PageInfo[Page], response_container: List) -> Callable:
"""Create a response handler that captures the final navigation response.
:param page_info: The PageInfo object containing the page
:param response_container: A list to store the final response (mutable container)
:return: A callback function for page.on("response", ...)
"""
def handle_response(finished_response: SyncPlaywrightResponse):
if (
finished_response.request.resource_type == "document"
and finished_response.request.is_navigation_request()
and finished_response.request.frame == page_info.page.main_frame
):
response_container[0] = finished_response
return handle_response
@contextmanager
def _page_generator(
self,
timeout: int | float,
extra_headers: Optional[Dict[str, str]],
disable_resources: bool,
proxy: Optional[ProxyType] = None,
blocked_domains: Optional[Set[str]] = None,
) -> Generator["PageInfo[Page]", None, None]:
"""Acquire a page - either from persistent context or fresh context with proxy."""
if proxy:
# Rotation mode: create fresh context with the provided proxy
if not self.browser: # pragma: no cover
raise RuntimeError("Browser not initialized for proxy rotation mode")
context_options = self._build_context_with_proxy(proxy)
context: BrowserContext = self.browser.new_context(**context_options)
try:
context = self._initialize_context(self._config, context)
page_info = self._get_page(timeout, extra_headers, disable_resources, blocked_domains, context=context)
yield page_info
finally:
context.close()
else:
# Standard mode: use PagePool with persistent context
page_info = self._get_page(timeout, extra_headers, disable_resources, blocked_domains)
try:
yield page_info
finally:
page_info.page.close()
self.page_pool.pages.remove(page_info)
class AsyncSession:
_config: "PlaywrightConfig | StealthConfig"
_context_options: Dict[str, Any]
def _build_context_with_proxy(self, proxy: Optional[ProxyType] = None) -> Dict[str, Any]:
raise NotImplementedError # pragma: no cover
def __init__(self, max_pages: int = 1):
self.max_pages = max_pages
self.page_pool = PagePool(max_pages)
self._max_wait_for_page = 60
self.playwright: Any = None
self.context: Any = None
self.browser: Any = None
self._is_alive = False
self._lock = Lock()
async def start(self) -> None:
pass
async def close(self):
"""Close all resources"""
if not self._is_alive: # pragma: no cover
return
if self.context:
await self.context.close()
self.context = None # pyright: ignore
if self.browser:
await self.browser.close()
self.browser = None
if self.playwright:
await self.playwright.stop()
self.playwright = None # pyright: ignore
self._is_alive = False
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def _initialize_context(
self, config: PlaywrightConfig | StealthConfig, ctx: AsyncBrowserContext
) -> AsyncBrowserContext:
"""Initialize the browser context."""
if config.init_script: # pragma: no cover
await ctx.add_init_script(path=config.init_script)
if config.cookies: # pragma: no cover
await ctx.add_cookies(config.cookies)
return ctx
async def _get_page(
self,
timeout: int | float,
extra_headers: Optional[Dict[str, str]],
disable_resources: bool,
blocked_domains: Optional[Set[str]] = None,
context: Optional[AsyncBrowserContext] = None,
) -> PageInfo[AsyncPage]: # pragma: no cover
"""Get a new page to use"""
ctx = context if context is not None else self.context
if TYPE_CHECKING:
assert ctx is not None, "Browser context not initialized"
async with self._lock:
# If we're at max capacity after cleanup, wait for busy pages to finish
if context is None and self.page_pool.pages_count >= self.max_pages:
# Only applies when using persistent context
start_time = time()
while time() - start_time < self._max_wait_for_page:
await asyncio_sleep(0.05)
if self.page_pool.pages_count < self.max_pages:
break
else:
raise TimeoutError(
f"No pages finished to clear place in the pool within the {self._max_wait_for_page}s timeout period"
)
page = await ctx.new_page()
page.set_default_navigation_timeout(timeout)
page.set_default_timeout(timeout)
if extra_headers:
await page.set_extra_http_headers(extra_headers)
if disable_resources or blocked_domains:
await page.route("**/*", create_async_intercept_handler(disable_resources, blocked_domains))
return self.page_pool.add_page(page)
def get_pool_stats(self) -> Dict[str, int]:
"""Get statistics about the current page pool"""
return {
"total_pages": self.page_pool.pages_count,
"busy_pages": self.page_pool.busy_count,
"max_pages": self.max_pages,
}
@staticmethod
async def _wait_for_networkidle(page: AsyncPage | AsyncFrame, timeout: Optional[int] = None):
"""Wait for the page to become idle (no network activity) even if there are never-ending requests."""
try:
await page.wait_for_load_state("networkidle", timeout=timeout)
except (PlaywrightError, Exception):
pass
async def _wait_for_page_stability(self, page: AsyncPage | AsyncFrame, load_dom: bool, network_idle: bool):
await page.wait_for_load_state(state="load")
if load_dom:
await page.wait_for_load_state(state="domcontentloaded")
if network_idle:
await self._wait_for_networkidle(page)
@staticmethod
def _create_response_handler(page_info: PageInfo[AsyncPage], response_container: List) -> Callable:
"""Create an async response handler that captures the final navigation response.
:param page_info: The PageInfo object containing the page
:param response_container: A list to store the final response (mutable container)
:return: A callback function for page.on("response", ...)
"""
async def handle_response(finished_response: AsyncPlaywrightResponse):
if (
finished_response.request.resource_type == "document"
and finished_response.request.is_navigation_request()
and finished_response.request.frame == page_info.page.main_frame
):
response_container[0] = finished_response
return handle_response
@asynccontextmanager
async def _page_generator(
self,
timeout: int | float,
extra_headers: Optional[Dict[str, str]],
disable_resources: bool,
proxy: Optional[ProxyType] = None,
blocked_domains: Optional[Set[str]] = None,
) -> AsyncGenerator["PageInfo[AsyncPage]", None]:
"""Acquire a page - either from persistent context or fresh context with proxy."""
if proxy:
# Rotation mode: create fresh context with the provided proxy
if not self.browser: # pragma: no cover
raise RuntimeError("Browser not initialized for proxy rotation mode")
context_options = self._build_context_with_proxy(proxy)
context: AsyncBrowserContext = await self.browser.new_context(**context_options)
try:
context = await self._initialize_context(self._config, context)
page_info = await self._get_page(
timeout, extra_headers, disable_resources, blocked_domains, context=context
)
yield page_info
finally:
await context.close()
else:
# Standard mode: use PagePool with persistent context
page_info = await self._get_page(timeout, extra_headers, disable_resources, blocked_domains)
try:
yield page_info
finally:
await page_info.page.close()
self.page_pool.pages.remove(page_info)
class BaseSessionMixin:
_config: "PlaywrightConfig | StealthConfig"
@overload
def __validate_routine__(self, params: Dict, model: type[StealthConfig]) -> StealthConfig: ...
@overload
def __validate_routine__(self, params: Dict, model: type[PlaywrightConfig]) -> PlaywrightConfig: ...
def __validate_routine__(
self, params: Dict, model: type[PlaywrightConfig] | type[StealthConfig]
) -> PlaywrightConfig | StealthConfig:
# Dark color scheme bypasses the 'prefersLightColor' check in creepjs
self._context_options: Dict[str, Any] = {"color_scheme": "dark", "device_scale_factor": 2}
self._browser_options: Dict[str, Any] = {
"args": DEFAULT_ARGS,
"ignore_default_args": HARMFUL_ARGS,
}
if "__max_pages" in params:
params["max_pages"] = params.pop("__max_pages")
config = validate(params, model=model)
self._headers_keys = (
{header.lower() for header in config.extra_headers.keys()} if config.extra_headers else set()
)
return config
def __generate_options__(self, extra_flags: Tuple | None = None) -> None:
config: PlaywrightConfig | StealthConfig = self._config
self._context_options.update(
{
"proxy": config.proxy,
"locale": config.locale,
"timezone_id": config.timezone_id,
"extra_http_headers": config.extra_headers,
}
)
# The default useragent in the headful is always correct now in the current versions of Playwright
if config.useragent:
self._context_options["user_agent"] = config.useragent
elif not config.useragent and config.headless:
self._context_options["user_agent"] = (
__default_chrome_useragent__ if config.real_chrome else __default_useragent__
)
if not config.cdp_url:
flags = self._browser_options["args"]
if config.extra_flags or extra_flags:
flags = list(set(flags + (config.extra_flags or extra_flags)))
self._browser_options.update(
{
"args": flags,
"headless": config.headless,
"channel": "chrome" if config.real_chrome else "chromium",
}
)
self._user_data_dir = config.user_data_dir
else:
self._browser_options = {}
if config.additional_args:
self._context_options.update(config.additional_args)
def _build_context_with_proxy(self, proxy: Optional[ProxyType] = None) -> Dict[str, Any]:
"""
Build context options with a specific proxy for rotation mode.
:param proxy: Proxy URL string or Playwright-style proxy dict to use for this context.
:return: Dictionary of context options for browser.new_context().
"""
context_options = self._context_options.copy()
# Override proxy if provided
if proxy:
context_options["proxy"] = construct_proxy_dict(proxy)
return context_options
class DynamicSessionMixin(BaseSessionMixin):
def __validate__(self, **params):
self._config = self.__validate_routine__(params, model=PlaywrightConfig)
self.__generate_options__()
class StealthySessionMixin(BaseSessionMixin):
def __validate__(self, **params):
self._config = self.__validate_routine__(params, model=StealthConfig)
self._context_options.update(
{
"is_mobile": False,
"has_touch": False,
# I'm thinking about disabling it to rest from all Service Workers' headache, but let's keep it as it is for now
"service_workers": "allow",
"ignore_https_errors": True,
"screen": {"width": 1920, "height": 1080},
"viewport": {"width": 1920, "height": 1080},
"permissions": ["geolocation", "notifications"],
}
)
self.__generate_stealth_options()
def __generate_stealth_options(self) -> None:
config = cast(StealthConfig, self._config)
flags: Tuple[str, ...] = tuple()
if not config.cdp_url:
flags = DEFAULT_ARGS + STEALTH_ARGS
if config.block_webrtc:
flags += (
"--webrtc-ip-handling-policy=disable_non_proxied_udp",
"--force-webrtc-ip-handling-policy", # Ensures the policy is enforced
)
if not config.allow_webgl:
flags += (
"--disable-webgl",
"--disable-webgl-image-chromium",
"--disable-webgl2",
)
if config.hide_canvas:
flags += ("--fingerprinting-canvas-image-data-noise",)
super(StealthySessionMixin, self).__generate_options__(flags)
@staticmethod
def _detect_cloudflare(page_content: str) -> str | None:
"""
Detect the type of Cloudflare challenge present in the provided page content.
This function analyzes the given page content to identify whether a specific
type of Cloudflare challenge is present. It checks for three predefined
challenge types: non-interactive, managed, and interactive. If a challenge
type is detected, it returns the corresponding type as a string. If no
challenge type is detected, it returns None.
Args:
page_content (str): The content of the page to analyze for Cloudflare
challenge types.
Returns:
str: A string representing the detected Cloudflare challenge type, if
found. Returns None if no challenge matches.
"""
challenge_types = (
"non-interactive",
"managed",
"interactive",
)
for ctype in challenge_types:
if f"cType: '{ctype}'" in page_content:
return ctype
# Check if turnstile captcha is embedded inside the page (Usually inside a closed Shadow iframe)
selector = Selector(content=page_content)
if selector.css('script[src*="challenges.cloudflare.com/turnstile/v"]'):
return "embedded"
return None
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/engines/_browsers/_base.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 447,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:scrapling/core/ai.py | from asyncio import gather
from mcp.server.fastmcp import FastMCP
from pydantic import BaseModel, Field
from scrapling.core.shell import Convertor
from scrapling.engines.toolbelt.custom import Response as _ScraplingResponse
from scrapling.engines.static import ImpersonateType
from scrapling.fetchers import (
Fetcher,
FetcherSession,
DynamicFetcher,
AsyncDynamicSession,
StealthyFetcher,
AsyncStealthySession,
)
from scrapling.core._types import (
Optional,
Tuple,
Mapping,
Dict,
List,
Any,
Generator,
Sequence,
SetCookieParam,
extraction_types,
SelectorWaitStates,
)
class ResponseModel(BaseModel):
"""Request's response information structure."""
status: int = Field(description="The status code returned by the website.")
content: list[str] = Field(description="The content as Markdown/HTML or the text content of the page.")
url: str = Field(description="The URL given by the user that resulted in this response.")
def _content_translator(content: Generator[str, None, None], page: _ScraplingResponse) -> ResponseModel:
"""Convert a content generator to a list of ResponseModel objects."""
return ResponseModel(status=page.status, content=[result for result in content], url=page.url)
def _normalize_credentials(credentials: Optional[Dict[str, str]]) -> Optional[Tuple[str, str]]:
"""Convert a credentials dictionary to a tuple accepted by fetchers."""
if not credentials:
return None
username = credentials.get("username")
password = credentials.get("password")
if username is None or password is None:
raise ValueError("Credentials dictionary must contain both 'username' and 'password' keys")
return username, password
class ScraplingMCPServer:
@staticmethod
def get(
url: str,
impersonate: ImpersonateType = "chrome",
extraction_type: extraction_types = "markdown",
css_selector: Optional[str] = None,
main_content_only: bool = True,
params: Optional[Dict] = None,
headers: Optional[Mapping[str, Optional[str]]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: Optional[int | float] = 30,
follow_redirects: bool = True,
max_redirects: int = 30,
retries: Optional[int] = 3,
retry_delay: Optional[int] = 1,
proxy: Optional[str] = None,
proxy_auth: Optional[Dict[str, str]] = None,
auth: Optional[Dict[str, str]] = None,
verify: Optional[bool] = True,
http3: Optional[bool] = False,
stealthy_headers: Optional[bool] = True,
) -> ResponseModel:
"""Make GET HTTP request to a URL and return a structured output of the result.
Note: This is only suitable for low-mid protection levels. For high-protection levels or websites that require JS loading, use the other tools directly.
Note: If the `css_selector` resolves to more than one element, all the elements will be returned.
:param url: The URL to request.
:param impersonate: Browser version to impersonate its fingerprint. It's using the latest chrome version by default.
:param extraction_type: The type of content to extract from the page. Defaults to "markdown". Options are:
- Markdown will convert the page content to Markdown format.
- HTML will return the raw HTML content of the page.
- Text will return the text content of the page.
:param css_selector: CSS selector to extract the content from the page. If main_content_only is True, then it will be executed on the main content of the page. Defaults to None.
:param main_content_only: Whether to extract only the main content of the page. Defaults to True. The main content here is the data inside the `<body>` tag.
:param params: Query string parameters for the request.
:param headers: Headers to include in the request.
:param cookies: Cookies to use in the request.
:param timeout: Number of seconds to wait before timing out.
:param follow_redirects: Whether to follow redirects. Defaults to True.
:param max_redirects: Maximum number of redirects. Default 30, use -1 for unlimited.
:param retries: Number of retry attempts. Defaults to 3.
:param retry_delay: Number of seconds to wait between retry attempts. Defaults to 1 second.
:param proxy: Proxy URL to use. Format: "http://username:password@localhost:8030".
Cannot be used together with the `proxies` parameter.
:param proxy_auth: HTTP basic auth for proxy in dictionary format with `username` and `password` keys.
:param auth: HTTP basic auth in dictionary format with `username` and `password` keys.
:param verify: Whether to verify HTTPS certificates.
:param http3: Whether to use HTTP3. Defaults to False. It might be problematic if used it with `impersonate`.
:param stealthy_headers: If enabled (default), it creates and adds real browser headers. It also sets the referer header as if this request came from a Google search of URL's domain.
"""
normalized_proxy_auth = _normalize_credentials(proxy_auth)
normalized_auth = _normalize_credentials(auth)
page = Fetcher.get(
url,
auth=normalized_auth,
proxy=proxy,
http3=http3,
verify=verify,
params=params,
proxy_auth=normalized_proxy_auth,
retry_delay=retry_delay,
stealthy_headers=stealthy_headers,
impersonate=impersonate,
headers=headers,
cookies=cookies,
timeout=timeout,
retries=retries,
max_redirects=max_redirects,
follow_redirects=follow_redirects,
)
return _content_translator(
Convertor._extract_content(
page,
css_selector=css_selector,
extraction_type=extraction_type,
main_content_only=main_content_only,
),
page,
)
@staticmethod
async def bulk_get(
urls: List[str],
impersonate: ImpersonateType = "chrome",
extraction_type: extraction_types = "markdown",
css_selector: Optional[str] = None,
main_content_only: bool = True,
params: Optional[Dict] = None,
headers: Optional[Mapping[str, Optional[str]]] = None,
cookies: Optional[Dict[str, str]] = None,
timeout: Optional[int | float] = 30,
follow_redirects: bool = True,
max_redirects: int = 30,
retries: Optional[int] = 3,
retry_delay: Optional[int] = 1,
proxy: Optional[str] = None,
proxy_auth: Optional[Dict[str, str]] = None,
auth: Optional[Dict[str, str]] = None,
verify: Optional[bool] = True,
http3: Optional[bool] = False,
stealthy_headers: Optional[bool] = True,
) -> List[ResponseModel]:
"""Make GET HTTP request to a group of URLs and for each URL, return a structured output of the result.
Note: This is only suitable for low-mid protection levels. For high-protection levels or websites that require JS loading, use the other tools directly.
Note: If the `css_selector` resolves to more than one element, all the elements will be returned.
:param urls: A list of the URLs to request.
:param impersonate: Browser version to impersonate its fingerprint. It's using the latest chrome version by default.
:param extraction_type: The type of content to extract from the page. Defaults to "markdown". Options are:
- Markdown will convert the page content to Markdown format.
- HTML will return the raw HTML content of the page.
- Text will return the text content of the page.
:param css_selector: CSS selector to extract the content from the page. If main_content_only is True, then it will be executed on the main content of the page. Defaults to None.
:param main_content_only: Whether to extract only the main content of the page. Defaults to True. The main content here is the data inside the `<body>` tag.
:param params: Query string parameters for the request.
:param headers: Headers to include in the request.
:param cookies: Cookies to use in the request.
:param timeout: Number of seconds to wait before timing out.
:param follow_redirects: Whether to follow redirects. Defaults to True.
:param max_redirects: Maximum number of redirects. Default 30, use -1 for unlimited.
:param retries: Number of retry attempts. Defaults to 3.
:param retry_delay: Number of seconds to wait between retry attempts. Defaults to 1 second.
:param proxy: Proxy URL to use. Format: "http://username:password@localhost:8030".
Cannot be used together with the `proxies` parameter.
:param proxy_auth: HTTP basic auth for proxy in dictionary format with `username` and `password` keys.
:param auth: HTTP basic auth in dictionary format with `username` and `password` keys.
:param verify: Whether to verify HTTPS certificates.
:param http3: Whether to use HTTP3. Defaults to False. It might be problematic if used it with `impersonate`.
:param stealthy_headers: If enabled (default), it creates and adds real browser headers. It also sets the referer header as if this request came from a Google search of URL's domain.
"""
normalized_proxy_auth = _normalize_credentials(proxy_auth)
normalized_auth = _normalize_credentials(auth)
async with FetcherSession() as session:
tasks: List[Any] = [
session.get(
url,
auth=normalized_auth,
proxy=proxy,
http3=http3,
verify=verify,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
retries=retries,
proxy_auth=normalized_proxy_auth,
retry_delay=retry_delay,
impersonate=impersonate,
max_redirects=max_redirects,
follow_redirects=follow_redirects,
stealthy_headers=stealthy_headers,
)
for url in urls
]
responses = await gather(*tasks)
return [
_content_translator(
Convertor._extract_content(
page,
css_selector=css_selector,
extraction_type=extraction_type,
main_content_only=main_content_only,
),
page,
)
for page in responses
]
@staticmethod
async def fetch(
url: str,
extraction_type: extraction_types = "markdown",
css_selector: Optional[str] = None,
main_content_only: bool = True,
headless: bool = True, # noqa: F821
google_search: bool = True,
real_chrome: bool = False,
wait: int | float = 0,
proxy: Optional[str | Dict[str, str]] = None,
timezone_id: str | None = None,
locale: str | None = None,
extra_headers: Optional[Dict[str, str]] = None,
useragent: Optional[str] = None,
cdp_url: Optional[str] = None,
timeout: int | float = 30000,
disable_resources: bool = False,
wait_selector: Optional[str] = None,
cookies: Sequence[SetCookieParam] | None = None,
network_idle: bool = False,
wait_selector_state: SelectorWaitStates = "attached",
) -> ResponseModel:
"""Use playwright to open a browser to fetch a URL and return a structured output of the result.
Note: This is only suitable for low-mid protection levels.
Note: If the `css_selector` resolves to more than one element, all the elements will be returned.
:param url: The URL to request.
:param extraction_type: The type of content to extract from the page. Defaults to "markdown". Options are:
- Markdown will convert the page content to Markdown format.
- HTML will return the raw HTML content of the page.
- Text will return the text content of the page.
:param css_selector: CSS selector to extract the content from the page. If main_content_only is True, then it will be executed on the main content of the page. Defaults to None.
:param main_content_only: Whether to extract only the main content of the page. Defaults to True. The main content here is the data inside the `<body>` tag.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request. It should be in a dictionary format that Playwright accepts.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param timezone_id: Changes the timezone of the browser. Defaults to the system timezone.
:param locale: Specify user locale, for example, `en-GB`, `de-DE`, etc. Locale will affect navigator.language value, Accept-Language request header value as well as number and date formatting
rules. Defaults to the system default locale.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
"""
page = await DynamicFetcher.async_fetch(
url,
wait=wait,
proxy=proxy,
locale=locale,
timeout=timeout,
cookies=cookies,
cdp_url=cdp_url,
headless=headless,
useragent=useragent,
timezone_id=timezone_id,
real_chrome=real_chrome,
network_idle=network_idle,
wait_selector=wait_selector,
extra_headers=extra_headers,
google_search=google_search,
disable_resources=disable_resources,
wait_selector_state=wait_selector_state,
)
return _content_translator(
Convertor._extract_content(
page,
css_selector=css_selector,
extraction_type=extraction_type,
main_content_only=main_content_only,
),
page,
)
@staticmethod
async def bulk_fetch(
urls: List[str],
extraction_type: extraction_types = "markdown",
css_selector: Optional[str] = None,
main_content_only: bool = True,
headless: bool = True, # noqa: F821
google_search: bool = True,
real_chrome: bool = False,
wait: int | float = 0,
proxy: Optional[str | Dict[str, str]] = None,
timezone_id: str | None = None,
locale: str | None = None,
extra_headers: Optional[Dict[str, str]] = None,
useragent: Optional[str] = None,
cdp_url: Optional[str] = None,
timeout: int | float = 30000,
disable_resources: bool = False,
wait_selector: Optional[str] = None,
cookies: Sequence[SetCookieParam] | None = None,
network_idle: bool = False,
wait_selector_state: SelectorWaitStates = "attached",
) -> List[ResponseModel]:
"""Use playwright to open a browser, then fetch a group of URLs at the same time, and for each page return a structured output of the result.
Note: This is only suitable for low-mid protection levels.
Note: If the `css_selector` resolves to more than one element, all the elements will be returned.
:param urls: A list of the URLs to request.
:param extraction_type: The type of content to extract from the page. Defaults to "markdown". Options are:
- Markdown will convert the page content to Markdown format.
- HTML will return the raw HTML content of the page.
- Text will return the text content of the page.
:param css_selector: CSS selector to extract the content from the page. If main_content_only is True, then it will be executed on the main content of the page. Defaults to None.
:param main_content_only: Whether to extract only the main content of the page. Defaults to True. The main content here is the data inside the `<body>` tag.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request. It should be in a dictionary format that Playwright accepts.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param timezone_id: Changes the timezone of the browser. Defaults to the system timezone.
:param locale: Specify user locale, for example, `en-GB`, `de-DE`, etc. Locale will affect navigator.language value, Accept-Language request header value as well as number and date formatting
rules. Defaults to the system default locale.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
"""
async with AsyncDynamicSession(
wait=wait,
proxy=proxy,
locale=locale,
timeout=timeout,
cookies=cookies,
cdp_url=cdp_url,
headless=headless,
max_pages=len(urls),
useragent=useragent,
timezone_id=timezone_id,
real_chrome=real_chrome,
network_idle=network_idle,
wait_selector=wait_selector,
google_search=google_search,
extra_headers=extra_headers,
disable_resources=disable_resources,
wait_selector_state=wait_selector_state,
) as session:
tasks = [session.fetch(url) for url in urls]
responses = await gather(*tasks)
return [
_content_translator(
Convertor._extract_content(
page,
css_selector=css_selector,
extraction_type=extraction_type,
main_content_only=main_content_only,
),
page,
)
for page in responses
]
@staticmethod
async def stealthy_fetch(
url: str,
extraction_type: extraction_types = "markdown",
css_selector: Optional[str] = None,
main_content_only: bool = True,
headless: bool = True, # noqa: F821
google_search: bool = True,
real_chrome: bool = False,
wait: int | float = 0,
proxy: Optional[str | Dict[str, str]] = None,
timezone_id: str | None = None,
locale: str | None = None,
extra_headers: Optional[Dict[str, str]] = None,
useragent: Optional[str] = None,
hide_canvas: bool = False,
cdp_url: Optional[str] = None,
timeout: int | float = 30000,
disable_resources: bool = False,
wait_selector: Optional[str] = None,
cookies: Sequence[SetCookieParam] | None = None,
network_idle: bool = False,
wait_selector_state: SelectorWaitStates = "attached",
block_webrtc: bool = False,
allow_webgl: bool = True,
solve_cloudflare: bool = False,
additional_args: Optional[Dict] = None,
) -> ResponseModel:
"""Use the stealthy fetcher to fetch a URL and return a structured output of the result.
Note: This is the only suitable fetcher for high protection levels.
Note: If the `css_selector` resolves to more than one element, all the elements will be returned.
:param url: The URL to request.
:param extraction_type: The type of content to extract from the page. Defaults to "markdown". Options are:
- Markdown will convert the page content to Markdown format.
- HTML will return the raw HTML content of the page.
- Text will return the text content of the page.
:param css_selector: CSS selector to extract the content from the page. If main_content_only is True, then it will be executed on the main content of the page. Defaults to None.
:param main_content_only: Whether to extract only the main content of the page. Defaults to True. The main content here is the data inside the `<body>` tag.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request.
:param solve_cloudflare: Solves all types of the Cloudflare's Turnstile/Interstitial challenges before returning the response to you.
:param allow_webgl: Enabled by default. Disabling WebGL is not recommended as many WAFs now check if WebGL is enabled.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param timezone_id: Changes the timezone of the browser. Defaults to the system timezone.
:param locale: Specify user locale, for example, `en-GB`, `de-DE`, etc. Locale will affect navigator.language value, Accept-Language request header value as well as number and date formatting
rules. Defaults to the system default locale.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
:param block_webrtc: Forces WebRTC to respect proxy settings to prevent local IP address leak.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param additional_args: Additional arguments to be passed to Playwright's context as additional settings, and it takes higher priority than Scrapling's settings.
"""
page = await StealthyFetcher.async_fetch(
url,
wait=wait,
proxy=proxy,
locale=locale,
cdp_url=cdp_url,
timeout=timeout,
cookies=cookies,
headless=headless,
useragent=useragent,
timezone_id=timezone_id,
real_chrome=real_chrome,
hide_canvas=hide_canvas,
allow_webgl=allow_webgl,
network_idle=network_idle,
block_webrtc=block_webrtc,
wait_selector=wait_selector,
google_search=google_search,
extra_headers=extra_headers,
additional_args=additional_args,
solve_cloudflare=solve_cloudflare,
disable_resources=disable_resources,
wait_selector_state=wait_selector_state,
)
return _content_translator(
Convertor._extract_content(
page,
css_selector=css_selector,
extraction_type=extraction_type,
main_content_only=main_content_only,
),
page,
)
@staticmethod
async def bulk_stealthy_fetch(
urls: List[str],
extraction_type: extraction_types = "markdown",
css_selector: Optional[str] = None,
main_content_only: bool = True,
headless: bool = True, # noqa: F821
google_search: bool = True,
real_chrome: bool = False,
wait: int | float = 0,
proxy: Optional[str | Dict[str, str]] = None,
timezone_id: str | None = None,
locale: str | None = None,
extra_headers: Optional[Dict[str, str]] = None,
useragent: Optional[str] = None,
hide_canvas: bool = False,
cdp_url: Optional[str] = None,
timeout: int | float = 30000,
disable_resources: bool = False,
wait_selector: Optional[str] = None,
cookies: Sequence[SetCookieParam] | None = None,
network_idle: bool = False,
wait_selector_state: SelectorWaitStates = "attached",
block_webrtc: bool = False,
allow_webgl: bool = True,
solve_cloudflare: bool = False,
additional_args: Optional[Dict] = None,
) -> List[ResponseModel]:
"""Use the stealthy fetcher to fetch a group of URLs at the same time, and for each page return a structured output of the result.
Note: This is the only suitable fetcher for high protection levels.
Note: If the `css_selector` resolves to more than one element, all the elements will be returned.
:param urls: A list of the URLs to request.
:param extraction_type: The type of content to extract from the page. Defaults to "markdown". Options are:
- Markdown will convert the page content to Markdown format.
- HTML will return the raw HTML content of the page.
- Text will return the text content of the page.
:param css_selector: CSS selector to extract the content from the page. If main_content_only is True, then it will be executed on the main content of the page. Defaults to None.
:param main_content_only: Whether to extract only the main content of the page. Defaults to True. The main content here is the data inside the `<body>` tag.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request.
:param solve_cloudflare: Solves all types of the Cloudflare's Turnstile/Interstitial challenges before returning the response to you.
:param allow_webgl: Enabled by default. Disabling WebGL is not recommended as many WAFs now check if WebGL is enabled.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param timezone_id: Changes the timezone of the browser. Defaults to the system timezone.
:param locale: Specify user locale, for example, `en-GB`, `de-DE`, etc. Locale will affect navigator.language value, Accept-Language request header value as well as number and date formatting
rules. Defaults to the system default locale.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param hide_canvas: Add random noise to canvas operations to prevent fingerprinting.
:param block_webrtc: Forces WebRTC to respect proxy settings to prevent local IP address leak.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param additional_args: Additional arguments to be passed to Playwright's context as additional settings, and it takes higher priority than Scrapling's settings.
"""
async with AsyncStealthySession(
wait=wait,
proxy=proxy,
locale=locale,
cdp_url=cdp_url,
timeout=timeout,
cookies=cookies,
headless=headless,
useragent=useragent,
timezone_id=timezone_id,
real_chrome=real_chrome,
hide_canvas=hide_canvas,
allow_webgl=allow_webgl,
network_idle=network_idle,
block_webrtc=block_webrtc,
wait_selector=wait_selector,
google_search=google_search,
extra_headers=extra_headers,
additional_args=additional_args,
solve_cloudflare=solve_cloudflare,
disable_resources=disable_resources,
wait_selector_state=wait_selector_state,
) as session:
tasks = [session.fetch(url) for url in urls]
responses = await gather(*tasks)
return [
_content_translator(
Convertor._extract_content(
page,
css_selector=css_selector,
extraction_type=extraction_type,
main_content_only=main_content_only,
),
page,
)
for page in responses
]
def serve(self, http: bool, host: str, port: int):
"""Serve the MCP server."""
server = FastMCP(name="Scrapling", host=host, port=port)
server.add_tool(self.get, title="get", description=self.get.__doc__, structured_output=True)
server.add_tool(self.bulk_get, title="bulk_get", description=self.bulk_get.__doc__, structured_output=True)
server.add_tool(self.fetch, title="fetch", description=self.fetch.__doc__, structured_output=True)
server.add_tool(
self.bulk_fetch, title="bulk_fetch", description=self.bulk_fetch.__doc__, structured_output=True
)
server.add_tool(
self.stealthy_fetch, title="stealthy_fetch", description=self.stealthy_fetch.__doc__, structured_output=True
)
server.add_tool(
self.bulk_stealthy_fetch,
title="bulk_stealthy_fetch",
description=self.bulk_stealthy_fetch.__doc__,
structured_output=True,
)
server.run(transport="stdio" if not http else "streamable-http")
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/core/ai.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 587,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:scrapling/engines/_browsers/_controllers.py | from time import sleep as time_sleep
from asyncio import sleep as asyncio_sleep
from playwright.sync_api import (
Locator,
sync_playwright,
)
from playwright.async_api import (
async_playwright,
Locator as AsyncLocator,
)
from scrapling.core.utils import log
from scrapling.core._types import Optional, ProxyType, Unpack
from scrapling.engines.toolbelt.proxy_rotation import is_proxy_error
from scrapling.engines.toolbelt.convertor import Response, ResponseFactory
from scrapling.engines.toolbelt.fingerprints import generate_convincing_referer
from scrapling.engines._browsers._types import PlaywrightSession, PlaywrightFetchParams
from scrapling.engines._browsers._base import SyncSession, AsyncSession, DynamicSessionMixin
from scrapling.engines._browsers._validators import validate_fetch as _validate, PlaywrightConfig
class DynamicSession(SyncSession, DynamicSessionMixin):
"""A Browser session manager with page pooling."""
__slots__ = (
"_config",
"_context_options",
"_browser_options",
"_user_data_dir",
"_headers_keys",
"max_pages",
"page_pool",
"_max_wait_for_page",
"playwright",
"context",
)
def __init__(self, **kwargs: Unpack[PlaywrightSession]):
"""A Browser session manager with page pooling, it's using a persistent browser Context by default with a temporary user profile directory.
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param blocked_domains: A set of domain names to block requests to. Subdomains are also matched (e.g., ``"example.com"`` blocks ``"sub.example.com"`` too).
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param page_action: Added for automation. A function that takes the `page` object and does the automation you need.
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param init_script: An absolute path to a JavaScript file to be executed on page creation for all pages in this session.
:param locale: Specify user locale, for example, `en-GB`, `de-DE`, etc. Locale will affect navigator.language value, Accept-Language request header value as well as number and date formatting
rules. Defaults to the system default locale.
:param timezone_id: Changes the timezone of the browser. Defaults to the system timezone.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param load_dom: Enabled by default, wait for all JavaScript on page(s) to fully load and execute.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param user_data_dir: Path to a User Data Directory, which stores browser session data like cookies and local storage. The default is to create a temporary directory.
:param extra_flags: A list of additional browser flags to pass to the browser on launch.
:param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
:param additional_args: Additional arguments to be passed to Playwright's context as additional settings, and it takes higher priority than Scrapling's settings.
"""
self.__validate__(**kwargs)
super().__init__()
def start(self):
"""Create a browser for this instance and context."""
if not self.playwright:
self.playwright = sync_playwright().start()
try:
if self._config.cdp_url: # pragma: no cover
self.browser = self.playwright.chromium.connect_over_cdp(endpoint_url=self._config.cdp_url)
if not self._config.proxy_rotator and self.browser:
self.context = self.browser.new_context(**self._context_options)
elif self._config.proxy_rotator:
self.browser = self.playwright.chromium.launch(**self._browser_options)
else:
persistent_options = (
self._browser_options | self._context_options | {"user_data_dir": self._user_data_dir}
)
self.context = self.playwright.chromium.launch_persistent_context(**persistent_options)
if self.context:
self.context = self._initialize_context(self._config, self.context)
self._is_alive = True
except Exception:
# Clean up playwright if browser setup fails
self.playwright.stop()
self.playwright = None
raise
else:
raise RuntimeError("Session has been already started")
def fetch(self, url: str, **kwargs: Unpack[PlaywrightFetchParams]) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: The Target url.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param page_action: Added for automation. A function that takes the `page` object and does the automation you need.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param blocked_domains: A set of domain names to block requests to. Subdomains are also matched (e.g., ``"example.com"`` blocks ``"sub.example.com"`` too).
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param load_dom: Enabled by default, wait for all JavaScript on page(s) to fully load and execute.
:param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
:param proxy: Static proxy to override rotator and session proxy. A new browser context will be created and used with it.
:return: A `Response` object.
"""
static_proxy = kwargs.pop("proxy", None)
params = _validate(kwargs, self, PlaywrightConfig)
if not self._is_alive: # pragma: no cover
raise RuntimeError("Context manager has been closed")
request_headers_keys = {h.lower() for h in params.extra_headers.keys()} if params.extra_headers else set()
referer = (
generate_convincing_referer(url)
if (params.google_search and "referer" not in request_headers_keys)
else None
)
for attempt in range(self._config.retries):
proxy: Optional[ProxyType] = None
if self._config.proxy_rotator and static_proxy is None:
proxy = self._config.proxy_rotator.get_proxy()
else:
proxy = static_proxy
with self._page_generator(
params.timeout, params.extra_headers, params.disable_resources, proxy, params.blocked_domains
) as page_info:
final_response = [None]
page = page_info.page
page.on("response", self._create_response_handler(page_info, final_response))
try:
first_response = page.goto(url, referer=referer)
self._wait_for_page_stability(page, params.load_dom, params.network_idle)
if not first_response:
raise RuntimeError(f"Failed to get response for {url}")
if params.page_action:
try:
_ = params.page_action(page)
except Exception as e: # pragma: no cover
log.error(f"Error executing page_action: {e}")
if params.wait_selector:
try:
waiter: Locator = page.locator(params.wait_selector)
waiter.first.wait_for(state=params.wait_selector_state)
self._wait_for_page_stability(page, params.load_dom, params.network_idle)
except Exception as e: # pragma: no cover
log.error(f"Error waiting for selector {params.wait_selector}: {e}")
page.wait_for_timeout(params.wait)
response = ResponseFactory.from_playwright_response(
page, first_response, final_response[0], params.selector_config, meta={"proxy": proxy}
)
return response
except Exception as e:
page_info.mark_error()
if attempt < self._config.retries - 1:
if is_proxy_error(e):
log.warning(
f"Proxy '{proxy}' failed (attempt {attempt + 1}) | Retrying in {self._config.retry_delay}s..."
)
else:
log.warning(
f"Attempt {attempt + 1} failed: {e}. Retrying in {self._config.retry_delay}s..."
)
time_sleep(self._config.retry_delay)
else:
log.error(f"Failed after {self._config.retries} attempts: {e}")
raise
raise RuntimeError("Request failed") # pragma: no cover
class AsyncDynamicSession(AsyncSession, DynamicSessionMixin):
"""An async Browser session manager with page pooling, it's using a persistent browser Context by default with a temporary user profile directory."""
__slots__ = (
"_config",
"_context_options",
"_browser_options",
"_user_data_dir",
"_headers_keys",
)
def __init__(self, **kwargs: Unpack[PlaywrightSession]):
"""A Browser session manager with page pooling
:param headless: Run the browser in headless/hidden (default), or headful/visible mode.
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param blocked_domains: A set of domain names to block requests to. Subdomains are also matched (e.g., ``"example.com"`` blocks ``"sub.example.com"`` too).
:param useragent: Pass a useragent string to be used. Otherwise the fetcher will generate a real Useragent of the same browser and use it.
:param cookies: Set cookies for the next request.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param load_dom: Enabled by default, wait for all JavaScript on page(s) to fully load and execute.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param page_action: Added for automation. A function that takes the `page` object and does the automation you need.
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param init_script: An absolute path to a JavaScript file to be executed on page creation for all pages in this session.
:param locale: Specify user locale, for example, `en-GB`, `de-DE`, etc. Locale will affect navigator.language value, Accept-Language request header value as well as number and date formatting
rules. Defaults to the system default locale.
:param timezone_id: Changes the timezone of the browser. Defaults to the system timezone.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param real_chrome: If you have a Chrome browser installed on your device, enable this, and the Fetcher will launch an instance of your browser and use it.
:param cdp_url: Instead of launching a new browser instance, connect to this CDP URL to control real browsers through CDP.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param proxy: The proxy to be used with requests, it can be a string or a dictionary with the keys 'server', 'username', and 'password' only.
:param max_pages: The maximum number of tabs to be opened at the same time. It will be used in rotation through a PagePool.
:param user_data_dir: Path to a User Data Directory, which stores browser session data like cookies and local storage. The default is to create a temporary directory.
:param extra_flags: A list of additional browser flags to pass to the browser on launch.
:param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
:param additional_args: Additional arguments to be passed to Playwright's context as additional settings, and it takes higher priority than Scrapling's settings.
"""
self.__validate__(**kwargs)
super().__init__(max_pages=self._config.max_pages)
async def start(self) -> None:
"""Create a browser for this instance and context."""
if not self.playwright:
self.playwright = await async_playwright().start()
try:
if self._config.cdp_url:
self.browser = await self.playwright.chromium.connect_over_cdp(endpoint_url=self._config.cdp_url)
if not self._config.proxy_rotator and self.browser:
self.context = await self.browser.new_context(**self._context_options)
elif self._config.proxy_rotator:
self.browser = await self.playwright.chromium.launch(**self._browser_options)
else:
persistent_options = (
self._browser_options | self._context_options | {"user_data_dir": self._user_data_dir}
)
self.context = await self.playwright.chromium.launch_persistent_context(**persistent_options)
if self.context:
self.context = await self._initialize_context(self._config, self.context)
self._is_alive = True
except Exception:
# Clean up playwright if browser setup fails
await self.playwright.stop()
self.playwright = None
raise
else:
raise RuntimeError("Session has been already started")
async def fetch(self, url: str, **kwargs: Unpack[PlaywrightFetchParams]) -> Response:
"""Opens up the browser and do your request based on your chosen options.
:param url: The Target url.
:param google_search: Enabled by default, Scrapling will set the referer header to be as if this request came from a Google search of this website's domain name.
:param timeout: The timeout in milliseconds that is used in all operations and waits through the page. The default is 30,000
:param wait: The time (milliseconds) the fetcher will wait after everything finishes before closing the page and returning the ` Response ` object.
:param page_action: Added for automation. A function that takes the `page` object and does the automation you need.
:param extra_headers: A dictionary of extra headers to add to the request. _The referer set by the `google_search` argument takes priority over the referer set here if used together._
:param disable_resources: Drop requests for unnecessary resources for a speed boost.
Requests dropped are of type `font`, `image`, `media`, `beacon`, `object`, `imageset`, `texttrack`, `websocket`, `csp_report`, and `stylesheet`.
:param blocked_domains: A set of domain names to block requests to. Subdomains are also matched (e.g., ``"example.com"`` blocks ``"sub.example.com"`` too).
:param wait_selector: Wait for a specific CSS selector to be in a specific state.
:param wait_selector_state: The state to wait for the selector given with `wait_selector`. The default state is `attached`.
:param network_idle: Wait for the page until there are no network connections for at least 500 ms.
:param load_dom: Enabled by default, wait for all JavaScript on page(s) to fully load and execute.
:param selector_config: The arguments that will be passed in the end while creating the final Selector's class.
:param proxy: Static proxy to override rotator and session proxy. A new browser context will be created and used with it.
:return: A `Response` object.
"""
static_proxy = kwargs.pop("proxy", None)
params = _validate(kwargs, self, PlaywrightConfig)
if not self._is_alive: # pragma: no cover
raise RuntimeError("Context manager has been closed")
request_headers_keys = {h.lower() for h in params.extra_headers.keys()} if params.extra_headers else set()
referer = (
generate_convincing_referer(url)
if (params.google_search and "referer" not in request_headers_keys)
else None
)
for attempt in range(self._config.retries):
proxy: Optional[ProxyType] = None
if self._config.proxy_rotator and static_proxy is None:
proxy = self._config.proxy_rotator.get_proxy()
else:
proxy = static_proxy
async with self._page_generator(
params.timeout, params.extra_headers, params.disable_resources, proxy, params.blocked_domains
) as page_info:
final_response = [None]
page = page_info.page
page.on("response", self._create_response_handler(page_info, final_response))
try:
first_response = await page.goto(url, referer=referer)
await self._wait_for_page_stability(page, params.load_dom, params.network_idle)
if not first_response:
raise RuntimeError(f"Failed to get response for {url}")
if params.page_action:
try:
_ = await params.page_action(page)
except Exception as e: # pragma: no cover
log.error(f"Error executing page_action: {e}")
if params.wait_selector:
try:
waiter: AsyncLocator = page.locator(params.wait_selector)
await waiter.first.wait_for(state=params.wait_selector_state)
await self._wait_for_page_stability(page, params.load_dom, params.network_idle)
except Exception as e: # pragma: no cover
log.error(f"Error waiting for selector {params.wait_selector}: {e}")
await page.wait_for_timeout(params.wait)
response = await ResponseFactory.from_async_playwright_response(
page, first_response, final_response[0], params.selector_config, meta={"proxy": proxy}
)
return response
except Exception as e:
page_info.mark_error()
if attempt < self._config.retries - 1:
if is_proxy_error(e):
log.warning(
f"Proxy '{proxy}' failed (attempt {attempt + 1}) | Retrying in {self._config.retry_delay}s..."
)
else:
log.warning(
f"Attempt {attempt + 1} failed: {e}. Retrying in {self._config.retry_delay}s..."
)
await asyncio_sleep(self._config.retry_delay)
else:
log.error(f"Failed after {self._config.retries} attempts: {e}")
raise
raise RuntimeError("Request failed") # pragma: no cover
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/engines/_browsers/_controllers.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:scrapling/engines/_browsers/_page.py | from threading import RLock
from dataclasses import dataclass
from playwright.sync_api._generated import Page as SyncPage
from playwright.async_api._generated import Page as AsyncPage
from scrapling.core._types import Optional, List, Literal, overload, TypeVar, Generic, cast
PageState = Literal["ready", "busy", "error"] # States that a page can be in
PageType = TypeVar("PageType", SyncPage, AsyncPage)
@dataclass
class PageInfo(Generic[PageType]):
"""Information about the page and its current state"""
__slots__ = ("page", "state", "url")
page: PageType
state: PageState
url: Optional[str]
def mark_busy(self, url: str = ""):
"""Mark the page as busy"""
self.state = "busy"
self.url = url
def mark_error(self):
"""Mark the page as having an error"""
self.state = "error"
def __repr__(self):
return f'Page(URL="{self.url!r}", state={self.state!r})'
def __eq__(self, other_page):
"""Comparing this page to another page object."""
if other_page.__class__ is not self.__class__:
return NotImplemented
return self.page == other_page.page
class PagePool:
"""Manages a pool of browser pages/tabs with state tracking"""
__slots__ = ("max_pages", "pages", "_lock")
def __init__(self, max_pages: int = 5):
self.max_pages = max_pages
self.pages: List[PageInfo[SyncPage] | PageInfo[AsyncPage]] = []
self._lock = RLock()
@overload
def add_page(self, page: SyncPage) -> PageInfo[SyncPage]: ...
@overload
def add_page(self, page: AsyncPage) -> PageInfo[AsyncPage]: ...
def add_page(self, page: SyncPage | AsyncPage) -> PageInfo[SyncPage] | PageInfo[AsyncPage]:
"""Add a new page to the pool"""
with self._lock:
if len(self.pages) >= self.max_pages:
raise RuntimeError(f"Maximum page limit ({self.max_pages}) reached")
if isinstance(page, AsyncPage):
page_info: PageInfo[SyncPage] | PageInfo[AsyncPage] = cast(
PageInfo[AsyncPage], PageInfo(page, "ready", "")
)
else:
page_info = cast(PageInfo[SyncPage], PageInfo(page, "ready", ""))
self.pages.append(page_info)
return page_info
@property
def pages_count(self) -> int:
"""Get the total number of pages"""
return len(self.pages)
@property
def busy_count(self) -> int:
"""Get the number of busy pages"""
with self._lock:
return sum(1 for p in self.pages if p.state == "busy")
def cleanup_error_pages(self):
"""Remove pages in error state"""
with self._lock:
self.pages = [p for p in self.pages if p.state != "error"]
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/engines/_browsers/_page.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
D4Vinci/Scrapling:scrapling/engines/_browsers/_validators.py | from pathlib import Path
from typing import Annotated
from functools import lru_cache
from urllib.parse import urlparse
from dataclasses import dataclass, fields
from msgspec import Struct, Meta, convert, ValidationError
from scrapling.core._types import (
Any,
Dict,
List,
Set,
Tuple,
Optional,
Callable,
Sequence,
overload,
SetCookieParam,
SelectorWaitStates,
)
from scrapling.engines.toolbelt.proxy_rotation import ProxyRotator
from scrapling.engines.toolbelt.navigation import construct_proxy_dict
from scrapling.engines._browsers._types import PlaywrightFetchParams, StealthFetchParams
# Custom validators for msgspec
@lru_cache(8)
def _is_invalid_file_path(value: str) -> bool | str: # pragma: no cover
"""Fast file path validation"""
path = Path(value)
if not path.exists():
return f"Init script path not found: {value}"
if not path.is_file():
return f"Init script is not a file: {value}"
if not path.is_absolute():
return f"Init script is not a absolute path: {value}"
return False
@lru_cache(2)
def _is_invalid_cdp_url(cdp_url: str) -> bool | str:
"""Fast CDP URL validation"""
if not cdp_url.startswith(("ws://", "wss://")):
return "CDP URL must use 'ws://' or 'wss://' scheme"
netloc = urlparse(cdp_url).netloc
if not netloc: # pragma: no cover
return "Invalid hostname for the CDP URL"
return False
# Type aliases for cleaner annotations
PagesCount = Annotated[int, Meta(ge=1, le=50)]
RetriesCount = Annotated[int, Meta(ge=1, le=10)]
Seconds = Annotated[int, float, Meta(ge=0)]
class PlaywrightConfig(Struct, kw_only=True, frozen=False, weakref=True):
"""Configuration struct for validation"""
max_pages: PagesCount = 1
headless: bool = True
disable_resources: bool = False
network_idle: bool = False
load_dom: bool = True
wait_selector: Optional[str] = None
wait_selector_state: SelectorWaitStates = "attached"
cookies: Sequence[SetCookieParam] | None = []
google_search: bool = True
wait: Seconds = 0
timezone_id: str | None = ""
page_action: Optional[Callable] = None
proxy: Optional[str | Dict[str, str] | Tuple] = None # The default value for proxy in Playwright's source is `None`
proxy_rotator: Optional[ProxyRotator] = None
extra_headers: Optional[Dict[str, str]] = None
timeout: Seconds = 30000
init_script: Optional[str] = None
user_data_dir: str = ""
selector_config: Optional[Dict] = {}
additional_args: Optional[Dict] = {}
locale: str | None = None
real_chrome: bool = False
cdp_url: Optional[str] = None
useragent: Optional[str] = None
extra_flags: Optional[List[str]] = None
blocked_domains: Optional[Set[str]] = None
retries: RetriesCount = 3
retry_delay: Seconds = 1
def __post_init__(self): # pragma: no cover
"""Custom validation after msgspec validation"""
if self.page_action and not callable(self.page_action):
raise TypeError(f"page_action must be callable, got {type(self.page_action).__name__}")
if self.proxy and self.proxy_rotator:
raise ValueError(
"Cannot use 'proxy_rotator' together with 'proxy'. "
"Use either a static proxy or proxy rotation, not both."
)
if self.proxy:
self.proxy = construct_proxy_dict(self.proxy)
if self.cdp_url:
cdp_msg = _is_invalid_cdp_url(self.cdp_url)
if cdp_msg:
raise ValueError(cdp_msg)
if not self.cookies:
self.cookies = []
if not self.extra_flags:
self.extra_flags = []
if not self.selector_config:
self.selector_config = {}
if not self.additional_args:
self.additional_args = {}
if self.init_script is not None:
validation_msg = _is_invalid_file_path(self.init_script)
if validation_msg:
raise ValueError(validation_msg)
class StealthConfig(PlaywrightConfig, kw_only=True, frozen=False, weakref=True):
allow_webgl: bool = True
hide_canvas: bool = False
block_webrtc: bool = False
solve_cloudflare: bool = False
def __post_init__(self):
"""Custom validation after msgspec validation"""
super(StealthConfig, self).__post_init__()
# Cloudflare timeout adjustment
if self.solve_cloudflare and self.timeout < 60_000:
self.timeout = 60_000
@dataclass
class _fetch_params:
"""A dataclass of all parameters used by `fetch` calls"""
google_search: bool
timeout: Seconds
wait: Seconds
page_action: Optional[Callable]
extra_headers: Optional[Dict[str, str]]
disable_resources: bool
wait_selector: Optional[str]
wait_selector_state: SelectorWaitStates
network_idle: bool
load_dom: bool
blocked_domains: Optional[Set[str]]
solve_cloudflare: bool
selector_config: Dict
def validate_fetch(
method_kwargs: Dict | PlaywrightFetchParams | StealthFetchParams,
session: Any,
model: type[PlaywrightConfig] | type[StealthConfig],
) -> _fetch_params: # pragma: no cover
result: Dict[str, Any] = {}
overrides: Dict[str, Any] = {}
kwargs_dict: Dict[str, Any] = dict(method_kwargs)
# Get all field names that _fetch_params needs
fetch_param_fields = {f.name for f in fields(_fetch_params)}
for key in fetch_param_fields:
if key in kwargs_dict:
overrides[key] = kwargs_dict[key]
elif hasattr(session, "_config") and hasattr(session._config, key):
result[key] = getattr(session._config, key)
if overrides:
validated_config = validate(overrides, model)
# Extract ONLY the fields that were actually overridden (not all fields)
# This prevents validated defaults from overwriting session config values
validated_dict = {
field: getattr(validated_config, field) for field in overrides.keys() if hasattr(validated_config, field)
}
# Preserve solve_cloudflare if the user explicitly provided it, even if the model doesn't have it
if "solve_cloudflare" in overrides:
validated_dict["solve_cloudflare"] = overrides["solve_cloudflare"]
# Start with session defaults, then overwrite with validated overrides
result.update(validated_dict)
# solve_cloudflare defaults to False for models that don't have it (PlaywrightConfig)
result.setdefault("solve_cloudflare", False)
result.setdefault("blocked_domains", None)
return _fetch_params(**result)
# Cache default values for each model to reduce validation overhead
models_default_values = {}
for _model in (StealthConfig, PlaywrightConfig):
_defaults = {}
if hasattr(_model, "__struct_defaults__") and hasattr(_model, "__struct_fields__"):
for field_name, default_value in zip(_model.__struct_fields__, _model.__struct_defaults__): # type: ignore
# Skip factory defaults - these are msgspec._core.Factory instances
if type(default_value).__name__ != "Factory":
_defaults[field_name] = default_value
models_default_values[_model.__name__] = _defaults.copy()
def _filter_defaults(params: Dict, model: str) -> Dict:
"""Filter out parameters that match their default values to reduce validation overhead."""
defaults = models_default_values[model]
return {k: v for k, v in params.items() if k not in defaults or v != defaults[k]}
@overload
def validate(params: Dict, model: type[StealthConfig]) -> StealthConfig: ...
@overload
def validate(params: Dict, model: type[PlaywrightConfig]) -> PlaywrightConfig: ...
def validate(params: Dict, model: type[PlaywrightConfig] | type[StealthConfig]) -> PlaywrightConfig | StealthConfig:
try:
# Filter out params with the default values (no need to validate them) to speed up validation
filtered = _filter_defaults(params, model.__name__)
return convert(filtered, model)
except ValidationError as e:
raise TypeError(f"Invalid argument type: {e}") from e
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/engines/_browsers/_validators.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:scrapling/engines/toolbelt/convertor.py | from functools import lru_cache
from re import compile as re_compile
from curl_cffi.requests import Response as CurlResponse
from playwright._impl._errors import Error as PlaywrightError
from playwright.sync_api import Page as SyncPage, Response as SyncResponse
from playwright.async_api import Page as AsyncPage, Response as AsyncResponse
from scrapling.core.utils import log
from .custom import Response, StatusText
from scrapling.core._types import Dict, Optional
__CHARSET_RE__ = re_compile(r"charset=([\w-]+)")
class ResponseFactory:
"""
Factory class for creating `Response` objects from various sources.
This class provides multiple static and instance methods for building standardized `Response` objects
from diverse input sources such as Playwright responses, asynchronous Playwright responses,
and raw HTTP request responses. It supports handling response histories, constructing the proper
response objects, and managing encoding, headers, cookies, and other attributes.
"""
@classmethod
@lru_cache(maxsize=16)
def __extract_browser_encoding(cls, content_type: str | None, default: str = "utf-8") -> str:
"""Extract browser encoding from headers.
Ex: from header "content-type: text/html; charset=utf-8" -> "utf-8
"""
if content_type:
# Because Playwright can't do that by themselves like all libraries for some reason :3
match = __CHARSET_RE__.search(content_type)
return match.group(1) if match else default
return default
@classmethod
def _process_response_history(cls, first_response: SyncResponse, parser_arguments: Dict) -> list[Response]:
"""Process response history to build a list of `Response` objects"""
history: list[Response] = []
current_request = first_response.request.redirected_from
try:
while current_request:
try:
current_response = current_request.response()
history.insert(
0,
Response(
**{
"url": current_request.url,
# using current_response.text() will trigger "Error: Response.text: Response body is unavailable for redirect responses"
"content": "",
"status": current_response.status if current_response else 301,
"reason": (current_response.status_text or StatusText.get(current_response.status))
if current_response
else StatusText.get(301),
"encoding": cls.__extract_browser_encoding(
current_response.headers.get("content-type", "")
)
if current_response
else "utf-8",
"cookies": tuple(),
"headers": current_response.all_headers() if current_response else {},
"request_headers": current_request.all_headers(),
**parser_arguments,
}
),
)
except Exception as e: # pragma: no cover
log.error(f"Error processing redirect: {e}")
break
current_request = current_request.redirected_from
except Exception as e: # pragma: no cover
log.error(f"Error processing response history: {e}")
return history
@classmethod
def from_playwright_response(
cls,
page: SyncPage,
first_response: SyncResponse,
final_response: Optional[SyncResponse],
parser_arguments: Dict,
meta: Optional[Dict] = None,
) -> Response:
"""
Transforms a Playwright response into an internal `Response` object, encapsulating
the page's content, response status, headers, and relevant metadata.
The function handles potential issues, such as empty or missing final responses,
by falling back to the first response if necessary. Encoding and status text
are also derived from the provided response headers or reasonable defaults.
Additionally, the page content and cookies are extracted for further use.
:param page: A synchronous Playwright `Page` instance that represents the current browser page. Required to retrieve the page's URL, cookies, and content.
:param final_response: The last response received for the given request from the Playwright instance. Typically used as the main response object to derive status, headers, and other metadata.
:param first_response: An earlier or initial Playwright `Response` object that may serve as a fallback response in the absence of the final one.
:param parser_arguments: A dictionary containing additional arguments needed for parsing or further customization of the returned `Response`. These arguments are dynamically unpacked into
the `Response` object.
:param meta: Additional meta data to be saved with the response.
:return: A fully populated `Response` object containing the page's URL, content, status, headers, cookies, and other derived metadata.
:rtype: Response
"""
# In case we didn't catch a document type somehow
final_response = final_response if final_response else first_response
if not final_response:
raise ValueError("Failed to get a response from the page")
encoding = cls.__extract_browser_encoding(final_response.headers.get("content-type", ""))
# PlayWright API sometimes give empty status text for some reason!
status_text = final_response.status_text or StatusText.get(final_response.status)
history = cls._process_response_history(first_response, parser_arguments)
try:
if "html" in final_response.all_headers().get("content-type", ""):
page_content = cls._get_page_content(page).encode("utf-8")
else:
page_content = final_response.body()
except Exception as e: # pragma: no cover
log.error(f"Error getting page content: {e}")
page_content = b""
return Response(
**{
"url": page.url,
"content": page_content,
"status": final_response.status,
"reason": status_text,
"encoding": encoding,
"cookies": tuple(dict(cookie) for cookie in page.context.cookies()),
"headers": first_response.all_headers(),
"request_headers": first_response.request.all_headers(),
"history": history,
"meta": meta,
**parser_arguments,
}
)
@classmethod
async def _async_process_response_history(
cls, first_response: AsyncResponse, parser_arguments: Dict
) -> list[Response]:
"""Process response history to build a list of `Response` objects"""
history: list[Response] = []
current_request = first_response.request.redirected_from
try:
while current_request:
try:
current_response = await current_request.response()
history.insert(
0,
Response(
**{
"url": current_request.url,
# using current_response.text() will trigger "Error: Response.text: Response body is unavailable for redirect responses"
"content": "",
"status": current_response.status if current_response else 301,
"reason": (current_response.status_text or StatusText.get(current_response.status))
if current_response
else StatusText.get(301),
"encoding": cls.__extract_browser_encoding(
current_response.headers.get("content-type", "")
)
if current_response
else "utf-8",
"cookies": tuple(),
"headers": await current_response.all_headers() if current_response else {},
"request_headers": await current_request.all_headers(),
**parser_arguments,
}
),
)
except Exception as e: # pragma: no cover
log.error(f"Error processing redirect: {e}")
break
current_request = current_request.redirected_from
except Exception as e: # pragma: no cover
log.error(f"Error processing response history: {e}")
return history
@classmethod
def _get_page_content(cls, page: SyncPage) -> str:
"""
A workaround for the Playwright issue with `page.content()` on Windows. Ref.: https://github.com/microsoft/playwright/issues/16108
:param page: The page to extract content from.
:return:
"""
while True:
try:
return page.content() or ""
except PlaywrightError:
page.wait_for_timeout(500)
continue
return "" # pyright: ignore
@classmethod
async def _get_async_page_content(cls, page: AsyncPage) -> str:
"""
A workaround for the Playwright issue with `page.content()` on Windows. Ref.: https://github.com/microsoft/playwright/issues/16108
:param page: The page to extract content from.
:return:
"""
while True:
try:
return (await page.content()) or ""
except PlaywrightError:
await page.wait_for_timeout(500)
continue
return "" # pyright: ignore
@classmethod
async def from_async_playwright_response(
cls,
page: AsyncPage,
first_response: AsyncResponse,
final_response: Optional[AsyncResponse],
parser_arguments: Dict,
meta: Optional[Dict] = None,
) -> Response:
"""
Transforms a Playwright response into an internal `Response` object, encapsulating
the page's content, response status, headers, and relevant metadata.
The function handles potential issues, such as empty or missing final responses,
by falling back to the first response if necessary. Encoding and status text
are also derived from the provided response headers or reasonable defaults.
Additionally, the page content and cookies are extracted for further use.
:param page: An asynchronous Playwright `Page` instance that represents the current browser page. Required to retrieve the page's URL, cookies, and content.
:param final_response: The last response received for the given request from the Playwright instance. Typically used as the main response object to derive status, headers, and other metadata.
:param first_response: An earlier or initial Playwright `Response` object that may serve as a fallback response in the absence of the final one.
:param parser_arguments: A dictionary containing additional arguments needed for parsing or further customization of the returned `Response`. These arguments are dynamically unpacked into
the `Response` object.
:param meta: Additional meta data to be saved with the response.
:return: A fully populated `Response` object containing the page's URL, content, status, headers, cookies, and other derived metadata.
:rtype: Response
"""
# In case we didn't catch a document type somehow
final_response = final_response if final_response else first_response
if not final_response:
raise ValueError("Failed to get a response from the page")
encoding = cls.__extract_browser_encoding(final_response.headers.get("content-type", ""))
# PlayWright API sometimes give empty status text for some reason!
status_text = final_response.status_text or StatusText.get(final_response.status)
history = await cls._async_process_response_history(first_response, parser_arguments)
try:
if "html" in (await final_response.all_headers()).get("content-type", ""):
page_content = (await cls._get_async_page_content(page)).encode("utf-8")
else:
page_content = await final_response.body()
except Exception as e: # pragma: no cover
log.error(f"Error getting page content in async: {e}")
page_content = b""
return Response(
**{
"url": page.url,
"content": page_content,
"status": final_response.status,
"reason": status_text,
"encoding": encoding,
"cookies": tuple(dict(cookie) for cookie in await page.context.cookies()),
"headers": await first_response.all_headers(),
"request_headers": await first_response.request.all_headers(),
"history": history,
"meta": meta,
**parser_arguments,
}
)
@staticmethod
def from_http_request(response: CurlResponse, parser_arguments: Dict, meta: Optional[Dict] = None) -> Response:
"""Takes `curl_cffi` response and generates `Response` object from it.
:param response: `curl_cffi` response object
:param parser_arguments: Additional arguments to be passed to the `Response` object constructor.
:param meta: Optional metadata dictionary to attach to the Response.
:return: A `Response` object that is the same as `Selector` object except it has these added attributes: `status`, `reason`, `cookies`, `headers`, and `request_headers`
"""
return Response(
**{
"url": response.url,
"content": response.content,
"status": response.status_code,
"reason": response.reason,
"encoding": response.encoding or "utf-8",
"cookies": dict(response.cookies),
"headers": dict(response.headers),
"request_headers": dict(response.request.headers) if response.request else {},
"method": response.request.method if response.request else "GET",
"history": response.history, # https://github.com/lexiforest/curl_cffi/issues/82
"meta": meta,
**parser_arguments,
}
)
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "scrapling/engines/toolbelt/convertor.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 273,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
D4Vinci/Scrapling:tests/ai/test_ai_mcp.py | import pytest
import pytest_httpbin
from scrapling.core.ai import ScraplingMCPServer, ResponseModel
@pytest_httpbin.use_class_based_httpbin
class TestMCPServer:
"""Test MCP server functionality"""
@pytest.fixture(scope="class")
def test_url(self, httpbin):
return f"{httpbin.url}/html"
@pytest.fixture
def server(self):
return ScraplingMCPServer()
def test_get_tool(self, server, test_url):
"""Test the get tool method"""
result = server.get(url=test_url, extraction_type="markdown")
assert isinstance(result, ResponseModel)
assert result.status == 200
assert result.url == test_url
@pytest.mark.asyncio
async def test_bulk_get_tool(self, server, test_url):
"""Test the bulk_get tool method"""
results = await server.bulk_get(urls=(test_url, test_url), extraction_type="html")
assert len(results) == 2
assert all(isinstance(r, ResponseModel) for r in results)
@pytest.mark.asyncio
async def test_fetch_tool(self, server, test_url):
"""Test the fetch tool method"""
result = await server.fetch(url=test_url, headless=True)
assert isinstance(result, ResponseModel)
assert result.status == 200
@pytest.mark.asyncio
async def test_bulk_fetch_tool(self, server, test_url):
"""Test the bulk_fetch tool method"""
result = await server.bulk_fetch(urls=(test_url, test_url), headless=True)
assert all(isinstance(r, ResponseModel) for r in result)
@pytest.mark.asyncio
async def test_stealthy_fetch_tool(self, server, test_url):
"""Test the stealthy_fetch tool method"""
result = await server.stealthy_fetch(url=test_url, headless=True)
assert isinstance(result, ResponseModel)
assert result.status == 200
@pytest.mark.asyncio
async def test_bulk_stealthy_fetch_tool(self, server, test_url):
"""Test the bulk_stealthy_fetch tool method"""
result = await server.bulk_stealthy_fetch(urls=(test_url, test_url), headless=True)
assert all(isinstance(r, ResponseModel) for r in result)
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/ai/test_ai_mcp.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/cli/test_cli.py | import pytest
from click.testing import CliRunner
from unittest.mock import patch, MagicMock
import pytest_httpbin
from scrapling.parser import Selector
from scrapling.cli import (
shell, mcp, get, post, put, delete, fetch, stealthy_fetch
)
@pytest_httpbin.use_class_based_httpbin
def configure_selector_mock():
"""Helper function to create a properly configured Selector mock"""
mock_response = MagicMock(spec=Selector)
mock_response.body = "<html><body>Test content</body></html>"
mock_response.html_content = "<html><body>Test content</body></html>"
mock_response.encoding = "utf-8"
mock_response.get_all_text.return_value = "Test content"
mock_response.css.return_value = [mock_response]
return mock_response
class TestCLI:
"""Test CLI functionality"""
@pytest.fixture
def html_url(self, httpbin):
return f"{httpbin.url}/html"
@pytest.fixture
def runner(self):
return CliRunner()
def test_shell_command(self, runner):
"""Test shell command"""
with patch('scrapling.core.shell.CustomShell') as mock_shell:
mock_instance = MagicMock()
mock_shell.return_value = mock_instance
result = runner.invoke(shell)
assert result.exit_code == 0
mock_instance.start.assert_called_once()
def test_mcp_command(self, runner):
"""Test MCP command"""
with patch('scrapling.core.ai.ScraplingMCPServer') as mock_server:
mock_instance = MagicMock()
mock_server.return_value = mock_instance
result = runner.invoke(mcp)
assert result.exit_code == 0
mock_instance.serve.assert_called_once()
def test_extract_get_command(self, runner, tmp_path, html_url):
"""Test extract `get` command"""
output_file = tmp_path / "output.md"
with patch('scrapling.fetchers.Fetcher.get') as mock_get:
mock_response = configure_selector_mock()
mock_response.status = 200
mock_get.return_value = mock_response
result = runner.invoke(
get,
[html_url, str(output_file)]
)
assert result.exit_code == 0
# Test with various options
with patch('scrapling.fetchers.Fetcher.get') as mock_get:
mock_get.return_value = mock_response
result = runner.invoke(
get,
[
html_url,
str(output_file),
'-H', 'User-Agent: Test',
'--cookies', 'session=abc123',
'--timeout', '60',
'--proxy', 'http://proxy:8080',
'-s', '.content',
'-p', 'page=1'
]
)
assert result.exit_code == 0
def test_extract_post_command(self, runner, tmp_path, html_url):
"""Test extract `post` command"""
output_file = tmp_path / "output.html"
with patch('scrapling.fetchers.Fetcher.post') as mock_post:
mock_response = configure_selector_mock()
mock_post.return_value = mock_response
result = runner.invoke(
post,
[
html_url,
str(output_file),
'-d', 'key=value',
'-j', '{"data": "test"}'
]
)
assert result.exit_code == 0
def test_extract_put_command(self, runner, tmp_path, html_url):
"""Test extract `put` command"""
output_file = tmp_path / "output.html"
with patch('scrapling.fetchers.Fetcher.put') as mock_put:
mock_response = configure_selector_mock()
mock_put.return_value = mock_response
result = runner.invoke(
put,
[
html_url,
str(output_file),
'-d', 'key=value',
'-j', '{"data": "test"}'
]
)
assert result.exit_code == 0
def test_extract_delete_command(self, runner, tmp_path, html_url):
"""Test extract `delete` command"""
output_file = tmp_path / "output.html"
with patch('scrapling.fetchers.Fetcher.delete') as mock_delete:
mock_response = configure_selector_mock()
mock_delete.return_value = mock_response
result = runner.invoke(
delete,
[
html_url,
str(output_file)
]
)
assert result.exit_code == 0
def test_extract_fetch_command(self, runner, tmp_path, html_url):
"""Test extract fetch command"""
output_file = tmp_path / "output.txt"
with patch('scrapling.fetchers.DynamicFetcher.fetch') as mock_fetch:
mock_response = configure_selector_mock()
mock_fetch.return_value = mock_response
result = runner.invoke(
fetch,
[
html_url,
str(output_file),
'--headless',
'--timeout', '60000'
]
)
assert result.exit_code == 0
def test_extract_stealthy_fetch_command(self, runner, tmp_path, html_url):
"""Test extract fetch command"""
output_file = tmp_path / "output.md"
with patch('scrapling.fetchers.StealthyFetcher.fetch') as mock_fetch:
mock_response = configure_selector_mock()
mock_fetch.return_value = mock_response
result = runner.invoke(
stealthy_fetch,
[
html_url,
str(output_file),
'--headless',
'--css-selector', 'body',
'--timeout', '60000'
]
)
assert result.exit_code == 0
def test_invalid_arguments(self, runner, html_url):
"""Test invalid arguments handling"""
# Missing required arguments
result = runner.invoke(get)
assert result.exit_code != 0
_ = runner.invoke(
get,
[html_url, 'output.invalid']
)
# Should handle the error gracefully
def test_impersonate_comma_separated(self, runner, tmp_path, html_url):
"""Test that comma-separated impersonate values are parsed correctly"""
output_file = tmp_path / "output.md"
with patch('scrapling.fetchers.Fetcher.get') as mock_get:
mock_response = configure_selector_mock()
mock_response.status = 200
mock_get.return_value = mock_response
result = runner.invoke(
get,
[
html_url,
str(output_file),
'--impersonate', 'chrome,firefox,safari'
]
)
assert result.exit_code == 0
# Verify that the impersonate argument was converted to a list
call_kwargs = mock_get.call_args[1]
assert isinstance(call_kwargs['impersonate'], list)
assert call_kwargs['impersonate'] == ['chrome', 'firefox', 'safari']
def test_impersonate_single_browser(self, runner, tmp_path, html_url):
"""Test that single impersonate value remains as string"""
output_file = tmp_path / "output.md"
with patch('scrapling.fetchers.Fetcher.get') as mock_get:
mock_response = configure_selector_mock()
mock_response.status = 200
mock_get.return_value = mock_response
result = runner.invoke(
get,
[
html_url,
str(output_file),
'--impersonate', 'chrome'
]
)
assert result.exit_code == 0
# Verify that the impersonate argument remains a string
call_kwargs = mock_get.call_args[1]
assert isinstance(call_kwargs['impersonate'], str)
assert call_kwargs['impersonate'] == 'chrome'
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/cli/test_cli.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 200,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/cli/test_shell_functionality.py | import pytest
from unittest.mock import patch, MagicMock
from scrapling.parser import Selector
from scrapling.core.shell import CustomShell, CurlParser, Convertor
class TestCurlParser:
"""Test curl command parsing"""
@pytest.fixture
def parser(self):
return CurlParser()
def test_basic_curl_parse(self, parser):
"""Test parsing basic curl commands"""
# Simple GET
curl_cmd = 'curl https://example.com'
request = parser.parse(curl_cmd)
assert request.url == 'https://example.com'
assert request.method == 'get'
assert request.data is None
def test_curl_with_headers(self, parser):
"""Test parsing curl with headers"""
curl_cmd = '''curl https://example.com \
-H "User-Agent: Mozilla/5.0" \
-H "Accept: application/json"'''
request = parser.parse(curl_cmd)
assert request.headers['User-Agent'] == 'Mozilla/5.0'
assert request.headers['Accept'] == 'application/json'
def test_curl_with_data(self, parser):
"""Test parsing curl with data"""
# Form data
curl_cmd = 'curl https://example.com -X POST -d "key=value&foo=bar"'
request = parser.parse(curl_cmd)
assert request.method == 'post'
assert request.data == 'key=value&foo=bar'
# JSON data
curl_cmd = """curl https://example.com -X POST --data-raw '{"key": "value"}'"""
request = parser.parse(curl_cmd)
assert request.json_data == {"key": "value"}
def test_curl_with_cookies(self, parser):
"""Test parsing curl with cookies"""
curl_cmd = '''curl https://example.com \
-H "Cookie: session=abc123; user=john" \
-b "extra=cookie"'''
request = parser.parse(curl_cmd)
assert request.cookies['session'] == 'abc123'
assert request.cookies['user'] == 'john'
assert request.cookies['extra'] == 'cookie'
def test_curl_with_proxy(self, parser):
"""Test parsing curl with proxy"""
curl_cmd = 'curl https://example.com -x http://proxy:8080 -U user:pass'
request = parser.parse(curl_cmd)
assert 'http://user:pass@proxy:8080' in request.proxy['http']
def test_curl2fetcher(self, parser):
"""Test converting curl to fetcher request"""
with patch('scrapling.fetchers.Fetcher.get') as mock_get:
mock_response = MagicMock()
mock_get.return_value = mock_response
curl_cmd = 'curl https://example.com'
_ = parser.convert2fetcher(curl_cmd)
mock_get.assert_called_once()
def test_invalid_curl_commands(self, parser):
"""Test handling invalid curl commands"""
# Invalid format
with pytest.raises(AttributeError):
parser.parse('not a curl command')
class TestConvertor:
"""Test content conversion functionality"""
@pytest.fixture
def sample_html(self):
return """
<html>
<body>
<div class="content">
<h1>Title</h1>
<p>Some text content</p>
</div>
</body>
</html>
"""
def test_extract_markdown(self, sample_html):
"""Test extracting content as Markdown"""
page = Selector(sample_html)
content = list(Convertor._extract_content(page, "markdown"))
assert len(content) > 0
assert "Title\n=====" in content[0] # Markdown conversion
def test_extract_html(self, sample_html):
"""Test extracting content as HTML"""
page = Selector(sample_html)
content = list(Convertor._extract_content(page, "html"))
assert len(content) > 0
assert "<h1>Title</h1>" in content[0]
def test_extract_text(self, sample_html):
"""Test extracting content as plain text"""
page = Selector(sample_html)
content = list(Convertor._extract_content(page, "text"))
assert len(content) > 0
assert "Title" in content[0]
assert "Some text content" in content[0]
def test_extract_with_selector(self, sample_html):
"""Test extracting with CSS selector"""
page = Selector(sample_html)
content = list(Convertor._extract_content(
page,
"text",
css_selector=".content"
))
assert len(content) > 0
def test_write_to_file(self, sample_html, tmp_path):
"""Test writing content to files"""
page = Selector(sample_html)
# Test markdown
md_file = tmp_path / "output.md"
Convertor.write_content_to_file(page, str(md_file))
assert md_file.exists()
# Test HTML
html_file = tmp_path / "output.html"
Convertor.write_content_to_file(page, str(html_file))
assert html_file.exists()
# Test text
txt_file = tmp_path / "output.txt"
Convertor.write_content_to_file(page, str(txt_file))
assert txt_file.exists()
def test_invalid_operations(self, sample_html):
"""Test error handling in convertor"""
page = Selector(sample_html)
# Invalid extraction type
with pytest.raises(ValueError):
list(Convertor._extract_content(page, "invalid"))
# Invalid filename
with pytest.raises(ValueError):
Convertor.write_content_to_file(page, "")
# Unknown file extension
with pytest.raises(ValueError):
Convertor.write_content_to_file(page, "output.xyz")
class TestCustomShell:
"""Test interactive shell functionality"""
def test_shell_initialization(self):
"""Test shell initialization"""
shell = CustomShell(code="", log_level="debug")
assert shell.log_level == 10 # DEBUG level
assert shell.page is None
assert len(shell.pages) == 0
def test_shell_namespace(self):
"""Test shell namespace creation"""
shell = CustomShell(code="")
namespace = shell.get_namespace()
# Check all expected functions/classes are available
assert 'get' in namespace
assert 'post' in namespace
assert 'Fetcher' in namespace
assert 'DynamicFetcher' in namespace
assert 'view' in namespace
assert 'uncurl' in namespace
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/cli/test_shell_functionality.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/core/test_shell_core.py | import pytest
from scrapling.core.shell import (
_CookieParser,
_ParseHeaders,
Request,
_known_logging_levels,
)
class TestCookieParser:
"""Test cookie parsing functionality"""
def test_simple_cookie_parsing(self):
"""Test parsing a simple cookie"""
cookie_string = "session_id=abc123"
cookies = list(_CookieParser(cookie_string))
assert len(cookies) == 1
assert cookies[0] == ("session_id", "abc123")
def test_multiple_cookies_parsing(self):
"""Test parsing multiple cookies"""
cookie_string = "session_id=abc123; theme=dark; lang=en"
cookies = list(_CookieParser(cookie_string))
assert len(cookies) == 3
cookie_dict = dict(cookies)
assert cookie_dict["session_id"] == "abc123"
assert cookie_dict["theme"] == "dark"
assert cookie_dict["lang"] == "en"
def test_cookie_with_attributes(self):
"""Test parsing cookies with attributes"""
cookie_string = "session_id=abc123; Path=/; HttpOnly; Secure"
cookies = list(_CookieParser(cookie_string))
assert len(cookies) == 1
assert cookies[0] == ("session_id", "abc123")
def test_empty_cookie_string(self):
"""Test parsing empty cookie string"""
cookies = list(_CookieParser(""))
assert len(cookies) == 0
def test_malformed_cookie_handling(self):
"""Test handling of malformed cookies"""
# Should not raise exception but may return an empty list
cookies = list(_CookieParser("invalid_cookie_format"))
assert isinstance(cookies, list)
class TestParseHeaders:
"""Test header parsing functionality"""
def test_simple_headers(self):
"""Test parsing simple headers"""
header_lines = [
"Content-Type: text/html",
"Content-Length: 1234",
"User-Agent: TestAgent/1.0"
]
headers, cookies = _ParseHeaders(header_lines)
assert headers["Content-Type"] == "text/html"
assert headers["Content-Length"] == "1234"
assert headers["User-Agent"] == "TestAgent/1.0"
assert len(cookies) == 0
def test_headers_with_cookies(self):
"""Test parsing headers with cookie headers"""
header_lines = [
"Content-Type: text/html",
"Set-Cookie: session_id=abc123",
"Set-Cookie: theme=dark; Path=/",
]
headers, cookies = _ParseHeaders(header_lines)
assert headers["Content-Type"] == "text/html"
assert "Set-Cookie" in headers # Should contain the first Set-Cookie
# Cookie parsing behavior depends on implementation
def test_headers_without_colons(self):
"""Test headers without colons"""
header_lines = [
"Content-Type: text/html",
"InvalidHeader;", # Header ending with semicolon
]
headers, cookies = _ParseHeaders(header_lines)
assert headers["Content-Type"] == "text/html"
assert "InvalidHeader" in headers
assert headers["InvalidHeader"] == ""
def test_invalid_header_format(self):
"""Test invalid header format raises error"""
header_lines = [
"Content-Type: text/html",
"InvalidHeaderWithoutColon", # No colon, no semicolon
]
with pytest.raises(ValueError, match="Could not parse header without colon"):
_ParseHeaders(header_lines)
def test_headers_with_multiple_colons(self):
"""Test headers with multiple colons"""
header_lines = [
"Authorization: Bearer: token123",
"X-Custom: value:with:colons",
]
headers, cookies = _ParseHeaders(header_lines)
assert headers["Authorization"] == "Bearer: token123"
assert headers["X-Custom"] == "value:with:colons"
def test_headers_with_whitespace(self):
"""Test headers with extra whitespace"""
header_lines = [
" Content-Type : text/html ",
"\tUser-Agent\t:\tTestAgent/1.0\t",
]
headers, cookies = _ParseHeaders(header_lines)
# Should handle whitespace correctly
assert "Content-Type" in headers or " Content-Type " in headers
assert "text/html" in str(headers.values()) or " text/html " in str(headers.values())
def test_parse_cookies_disabled(self):
"""Test parsing with cookies disabled"""
header_lines = [
"Content-Type: text/html",
"Set-Cookie: session_id=abc123",
]
headers, cookies = _ParseHeaders(header_lines, parse_cookies=False)
assert headers["Content-Type"] == "text/html"
# Cookie parsing behavior when disabled
assert len(cookies) == 0 or "Set-Cookie" in headers
def test_empty_header_lines(self):
"""Test parsing empty header lines"""
headers, cookies = _ParseHeaders([])
assert len(headers) == 0
assert len(cookies) == 0
class TestRequestNamedTuple:
"""Test Request namedtuple functionality"""
def test_request_creation(self):
"""Test creating Request namedtuple"""
request = Request(
method="GET",
url="https://example.com",
params={"q": "test"},
data=None,
json_data=None,
headers={"User-Agent": "Test"},
cookies={"session": "abc123"},
proxy=None,
follow_redirects=True
)
assert request.method == "GET"
assert request.url == "https://example.com"
assert request.params == {"q": "test"}
assert request.headers == {"User-Agent": "Test"}
assert request.follow_redirects is True
def test_request_defaults(self):
"""Test Request with default/None values"""
request = Request(
method="POST",
url="https://api.example.com",
params=None,
data='{"key": "value"}',
json_data={"key": "value"},
headers={},
cookies={},
proxy="http://proxy:8080",
follow_redirects=False
)
assert request.method == "POST"
assert request.data == '{"key": "value"}'
assert request.json_data == {"key": "value"}
assert request.proxy == "http://proxy:8080"
assert request.follow_redirects is False
def test_request_field_access(self):
"""Test accessing Request fields"""
request = Request(
"GET", "https://example.com", {}, None, None, {}, {}, None, True
)
# Test field access by name
assert hasattr(request, 'method')
assert hasattr(request, 'url')
assert hasattr(request, 'params')
assert hasattr(request, 'data')
assert hasattr(request, 'json_data')
assert hasattr(request, 'headers')
assert hasattr(request, 'cookies')
assert hasattr(request, 'proxy')
assert hasattr(request, 'follow_redirects')
# Test field access by index
assert request[0] == "GET"
assert request[1] == "https://example.com"
class TestLoggingLevels:
"""Test logging level constants"""
def test_known_logging_levels(self):
"""Test that all known logging levels are defined"""
expected_levels = ["debug", "info", "warning", "error", "critical", "fatal"]
for level in expected_levels:
assert level in _known_logging_levels
assert isinstance(_known_logging_levels[level], int)
def test_logging_level_values(self):
"""Test logging level values are correct"""
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL
assert _known_logging_levels["debug"] == DEBUG
assert _known_logging_levels["info"] == INFO
assert _known_logging_levels["warning"] == WARNING
assert _known_logging_levels["error"] == ERROR
assert _known_logging_levels["critical"] == CRITICAL
assert _known_logging_levels["fatal"] == FATAL
def test_level_hierarchy(self):
"""Test that logging levels have correct hierarchy"""
levels = [
_known_logging_levels["debug"],
_known_logging_levels["info"],
_known_logging_levels["warning"],
_known_logging_levels["error"],
_known_logging_levels["critical"],
]
# Levels should be in ascending order
for i in range(len(levels) - 1):
assert levels[i] < levels[i + 1]
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/core/test_shell_core.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/core/test_storage_core.py | import tempfile
import os
from scrapling.core.storage import SQLiteStorageSystem
class TestSQLiteStorageSystem:
"""Test SQLiteStorageSystem functionality"""
def test_sqlite_storage_creation(self):
"""Test SQLite storage system creation"""
# Use an in-memory database for testing
storage = SQLiteStorageSystem(storage_file=":memory:")
assert storage is not None
def test_sqlite_storage_with_file(self):
"""Test SQLite storage with an actual file"""
with tempfile.NamedTemporaryFile(suffix='.db', delete=False) as tmp_file:
db_path = tmp_file.name
storage = None
try:
storage = SQLiteStorageSystem(storage_file=db_path)
assert storage is not None
assert os.path.exists(db_path)
finally:
# Close the database connection before deleting (required on Windows)
if storage is not None:
storage.close()
if os.path.exists(db_path):
os.unlink(db_path)
def test_sqlite_storage_initialization_args(self):
"""Test SQLite storage with various initialization arguments"""
# Test with URL parameter
storage = SQLiteStorageSystem(
storage_file=":memory:",
url="https://example.com"
)
assert storage is not None
assert storage.url == "https://example.com"
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/core/test_storage_core.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/async/test_dynamic.py | import pytest
import pytest_httpbin
from scrapling import DynamicFetcher
DynamicFetcher.adaptive = True
@pytest_httpbin.use_class_based_httpbin
class TestDynamicFetcherAsync:
@pytest.fixture
def fetcher(self):
return DynamicFetcher
@pytest.fixture
def urls(self, httpbin):
return {
"status_200": f"{httpbin.url}/status/200",
"status_404": f"{httpbin.url}/status/404",
"status_501": f"{httpbin.url}/status/501",
"basic_url": f"{httpbin.url}/get",
"html_url": f"{httpbin.url}/html",
"delayed_url": f"{httpbin.url}/delay/10",
"cookies_url": f"{httpbin.url}/cookies/set/test/value",
}
@pytest.mark.asyncio
async def test_basic_fetch(self, fetcher, urls):
"""Test doing a basic fetch request with multiple statuses"""
response = await fetcher.async_fetch(urls["status_200"])
assert response.status == 200
@pytest.mark.asyncio
async def test_cookies_loading(self, fetcher, urls):
"""Test if cookies are set after the request"""
response = await fetcher.async_fetch(urls["cookies_url"])
cookies = {response.cookies[0]['name']: response.cookies[0]['value']}
assert cookies == {"test": "value"}
@pytest.mark.asyncio
async def test_automation(self, fetcher, urls):
"""Test if automation breaks the code or not"""
async def scroll_page(page):
await page.mouse.wheel(10, 0)
await page.mouse.move(100, 400)
await page.mouse.up()
return page
response = await fetcher.async_fetch(urls["html_url"], page_action=scroll_page)
assert response.status == 200
@pytest.mark.parametrize(
"kwargs",
[
{"real_chrome": True, "disable_resources": True},
{"wait_selector": "h1", "wait_selector_state": "attached"},
{"wait_selector": "h1", "wait_selector_state": "visible"},
{
"google_search": True,
"real_chrome": True,
"wait": 10,
"locale": "en-US",
"extra_headers": {"ayo": ""},
"useragent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:131.0) Gecko/20100101 Firefox/131.0",
"cookies": [{"name": "test", "value": "123", "domain": "example.com", "path": "/"}],
"network_idle": True,
"selector_config": {"keep_comments": False, "keep_cdata": False},
},
],
)
@pytest.mark.asyncio
async def test_properties(self, fetcher, urls, kwargs):
"""Test if different arguments break the code or not"""
response = await fetcher.async_fetch(urls["html_url"], **kwargs)
assert response.status == 200
@pytest.mark.asyncio
async def test_cdp_url_invalid(self, fetcher, urls):
"""Test if invalid CDP URLs raise appropriate exceptions"""
with pytest.raises(TypeError):
await fetcher.async_fetch(urls["html_url"], cdp_url="blahblah")
with pytest.raises(TypeError):
await fetcher.async_fetch(
urls["html_url"], cdp_url="blahblah"
)
with pytest.raises(Exception):
await fetcher.async_fetch(urls["html_url"], cdp_url="ws://blahblah")
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/async/test_dynamic.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/async/test_dynamic_session.py | import pytest
import asyncio
import pytest_httpbin
from scrapling.fetchers import AsyncDynamicSession
@pytest_httpbin.use_class_based_httpbin
@pytest.mark.asyncio
class TestAsyncDynamicSession:
"""Test AsyncDynamicSession"""
# The `AsyncDynamicSession` is inheriting from `DynamicSession` class so no need to repeat all the tests
@pytest.fixture
def urls(self, httpbin):
return {
"basic": f"{httpbin.url}/get",
"html": f"{httpbin.url}/html",
}
async def test_concurrent_async_requests(self, urls):
"""Test concurrent requests with async session"""
async with AsyncDynamicSession(max_pages=3) as session:
# Launch multiple concurrent requests
tasks = [
session.fetch(urls["basic"]),
session.fetch(urls["html"]),
session.fetch(urls["basic"])
]
assert session.max_pages == 3
assert session.page_pool.max_pages == 3
assert session.context is not None
responses = await asyncio.gather(*tasks)
# All should succeed
assert all(r.status == 200 for r in responses)
# Check pool stats
stats = session.get_pool_stats()
assert stats["total_pages"] <= 3
# After exit, should be closed
assert session._is_alive is False
# Should raise RuntimeError when used after closing
with pytest.raises(RuntimeError):
await session.fetch(urls["basic"])
async def test_page_pool_management(self, urls):
"""Test page pool creation and reuse"""
async with AsyncDynamicSession() as session:
# The first request creates a page
response = await session.fetch(urls["basic"])
assert response.status == 200
assert session.page_pool.pages_count == 0
# The second request should reuse the page
response = await session.fetch(urls["html"])
assert response.status == 200
assert session.page_pool.pages_count == 0
# Check pool stats
stats = session.get_pool_stats()
assert stats["total_pages"] == 0
assert stats["max_pages"] == 1
async def test_dynamic_session_with_options(self, urls):
"""Test AsyncDynamicSession with various options"""
async with AsyncDynamicSession(
headless=False,
disable_resources=True,
extra_headers={"X-Test": "value"}
) as session:
response = await session.fetch(urls["html"])
assert response.status == 200
async def test_error_handling_in_fetch(self, urls):
"""Test error handling during fetch"""
async with AsyncDynamicSession() as session:
# Test with invalid URL
with pytest.raises(Exception):
await session.fetch("invalid://url")
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/async/test_dynamic_session.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/async/test_requests.py | import pytest
import pytest_httpbin
from scrapling.fetchers import AsyncFetcher
AsyncFetcher.adaptive = True
@pytest_httpbin.use_class_based_httpbin
@pytest.mark.asyncio
class TestAsyncFetcher:
@pytest.fixture(scope="class")
def fetcher(self):
return AsyncFetcher
@pytest.fixture(scope="class")
def urls(self, httpbin):
return {
"status_200": f"{httpbin.url}/status/200",
"status_404": f"{httpbin.url}/status/404",
"status_501": f"{httpbin.url}/status/501",
"basic_url": f"{httpbin.url}/get",
"post_url": f"{httpbin.url}/post",
"put_url": f"{httpbin.url}/put",
"delete_url": f"{httpbin.url}/delete",
"html_url": f"{httpbin.url}/html",
}
async def test_basic_get(self, fetcher, urls):
"""Test doing basic get request with multiple statuses"""
assert (await fetcher.get(urls["status_200"])).status == 200
assert (await fetcher.get(urls["status_404"])).status == 404
assert (await fetcher.get(urls["status_501"])).status == 501
async def test_get_properties(self, fetcher, urls):
"""Test if different arguments with the GET request break the code or not"""
assert (
await fetcher.get(urls["status_200"], stealthy_headers=True)
).status == 200
assert (
await fetcher.get(urls["status_200"], follow_redirects=True)
).status == 200
assert (await fetcher.get(urls["status_200"], timeout=None)).status == 200
assert (
await fetcher.get(
urls["status_200"],
stealthy_headers=True,
follow_redirects=True,
timeout=None,
)
).status == 200
async def test_post_properties(self, fetcher, urls):
"""Test if different arguments with the POST request break the code or not"""
assert (
await fetcher.post(urls["post_url"], data={"key": "value"})
).status == 200
assert (
await fetcher.post(
urls["post_url"], data={"key": "value"}, stealthy_headers=True
)
).status == 200
assert (
await fetcher.post(
urls["post_url"], data={"key": "value"}, follow_redirects=True
)
).status == 200
assert (
await fetcher.post(urls["post_url"], data={"key": "value"}, timeout=None)
).status == 200
assert (
await fetcher.post(
urls["post_url"],
data={"key": "value"},
stealthy_headers=True,
follow_redirects=True,
timeout=None,
)
).status == 200
async def test_put_properties(self, fetcher, urls):
"""Test if different arguments with a PUT request break the code or not"""
assert (await fetcher.put(urls["put_url"], data={"key": "value"})).status in [
200,
405,
]
assert (
await fetcher.put(
urls["put_url"], data={"key": "value"}, stealthy_headers=True
)
).status in [200, 405]
assert (
await fetcher.put(
urls["put_url"], data={"key": "value"}, follow_redirects=True
)
).status in [200, 405]
assert (
await fetcher.put(urls["put_url"], data={"key": "value"}, timeout=None)
).status in [200, 405]
assert (
await fetcher.put(
urls["put_url"],
data={"key": "value"},
stealthy_headers=True,
follow_redirects=True,
timeout=None,
)
).status in [200, 405]
async def test_delete_properties(self, fetcher, urls):
"""Test if different arguments with the DELETE request break the code or not"""
assert (
await fetcher.delete(urls["delete_url"], stealthy_headers=True)
).status == 200
assert (
await fetcher.delete(urls["delete_url"], follow_redirects=True)
).status == 200
assert (await fetcher.delete(urls["delete_url"], timeout=None)).status == 200
assert (
await fetcher.delete(
urls["delete_url"],
stealthy_headers=True,
follow_redirects=True,
timeout=None,
)
).status == 200
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/async/test_requests.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/async/test_requests_session.py |
from scrapling.engines.static import AsyncFetcherClient
class TestFetcherSession:
"""Test FetcherSession functionality"""
def test_async_fetcher_client_creation(self):
"""Test AsyncFetcherClient creation"""
client = AsyncFetcherClient()
# Should not have context manager methods
assert client.__aenter__ is None
assert client.__aexit__ is None
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/async/test_requests_session.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/sync/test_dynamic.py | import pytest
import pytest_httpbin
from scrapling import DynamicFetcher
DynamicFetcher.adaptive = True
@pytest_httpbin.use_class_based_httpbin
class TestDynamicFetcher:
@pytest.fixture(scope="class")
def fetcher(self):
"""Fixture to create a StealthyFetcher instance for the entire test class"""
return DynamicFetcher
@pytest.fixture(autouse=True)
def setup_urls(self, httpbin):
"""Fixture to set up URLs for testing"""
self.status_200 = f"{httpbin.url}/status/200"
self.status_404 = f"{httpbin.url}/status/404"
self.status_501 = f"{httpbin.url}/status/501"
self.basic_url = f"{httpbin.url}/get"
self.html_url = f"{httpbin.url}/html"
self.delayed_url = f"{httpbin.url}/delay/10" # 10 Seconds delay response
self.cookies_url = f"{httpbin.url}/cookies/set/test/value"
def test_basic_fetch(self, fetcher):
"""Test doing a basic fetch request with multiple statuses"""
assert fetcher.fetch(self.status_200).status == 200
# There's a bug with playwright makes it crashes if a URL returns status code 4xx/5xx without body, let's disable this till they reply to my issue report
# assert fetcher.fetch(self.status_404).status == 404
# assert fetcher.fetch(self.status_501).status == 501
def test_cookies_loading(self, fetcher):
"""Test if cookies are set after the request"""
response = fetcher.fetch(self.cookies_url)
cookies = {response.cookies[0]['name']: response.cookies[0]['value']}
assert cookies == {"test": "value"}
def test_automation(self, fetcher):
"""Test if automation breaks the code or not"""
def scroll_page(page):
page.mouse.wheel(10, 0)
page.mouse.move(100, 400)
page.mouse.up()
return page
assert fetcher.fetch(self.html_url, page_action=scroll_page).status == 200
@pytest.mark.parametrize(
"kwargs",
[
{"disable_resources": True, "real_chrome": True},
{"wait_selector": "h1", "wait_selector_state": "attached"},
{"wait_selector": "h1", "wait_selector_state": "visible"},
{
"google_search": True,
"real_chrome": True,
"wait": 10,
"locale": "en-US",
"timezone_id": "America/New_York",
"extra_headers": {"ayo": ""},
"useragent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:131.0) Gecko/20100101 Firefox/131.0",
"cookies": [{"name": "test", "value": "123", "domain": "example.com", "path": "/"}],
"network_idle": True,
"selector_config": {"keep_comments": False, "keep_cdata": False},
},
],
)
def test_properties(self, fetcher, kwargs):
"""Test if different arguments break the code or not"""
response = fetcher.fetch(self.html_url, **kwargs)
assert response.status == 200
def test_cdp_url_invalid(self, fetcher):
"""Test if invalid CDP URLs raise appropriate exceptions"""
with pytest.raises(TypeError):
fetcher.fetch(self.html_url, cdp_url="blahblah")
with pytest.raises(TypeError):
fetcher.fetch(self.html_url, cdp_url="blahblah")
with pytest.raises(Exception):
fetcher.fetch(self.html_url, cdp_url="ws://blahblah")
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/sync/test_dynamic.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/sync/test_requests.py | import pytest
import pytest_httpbin
from scrapling import Fetcher
Fetcher.adaptive = True
@pytest_httpbin.use_class_based_httpbin
class TestFetcher:
@pytest.fixture(scope="class")
def fetcher(self):
"""Fixture to create a Fetcher instance for the entire test class"""
return Fetcher
@pytest.fixture(autouse=True)
def setup_urls(self, httpbin):
"""Fixture to set up URLs for testing"""
self.status_200 = f"{httpbin.url}/status/200"
self.status_404 = f"{httpbin.url}/status/404"
self.status_501 = f"{httpbin.url}/status/501"
self.basic_url = f"{httpbin.url}/get"
self.post_url = f"{httpbin.url}/post"
self.put_url = f"{httpbin.url}/put"
self.delete_url = f"{httpbin.url}/delete"
self.html_url = f"{httpbin.url}/html"
def test_basic_get(self, fetcher):
"""Test doing basic get request with multiple statuses"""
assert fetcher.get(self.status_200).status == 200
assert fetcher.get(self.status_404).status == 404
assert fetcher.get(self.status_501).status == 501
def test_get_properties(self, fetcher):
"""Test if different arguments with the GET request break the code or not"""
assert fetcher.get(self.status_200, stealthy_headers=True).status == 200
assert fetcher.get(self.status_200, follow_redirects=True).status == 200
assert fetcher.get(self.status_200, timeout=None).status == 200
assert (
fetcher.get(
self.status_200,
stealthy_headers=True,
follow_redirects=True,
timeout=None,
).status
== 200
)
def test_post_properties(self, fetcher):
"""Test if different arguments with the POST request break the code or not"""
assert fetcher.post(self.post_url, data={"key": "value"}).status == 200
assert (
fetcher.post(
self.post_url, data={"key": "value"}, stealthy_headers=True
).status
== 200
)
assert (
fetcher.post(
self.post_url, data={"key": "value"}, follow_redirects=True
).status
== 200
)
assert (
fetcher.post(self.post_url, data={"key": "value"}, timeout=None).status
== 200
)
assert (
fetcher.post(
self.post_url,
data={"key": "value"},
stealthy_headers=True,
follow_redirects=True,
timeout=None,
).status
== 200
)
def test_put_properties(self, fetcher):
"""Test if different arguments with a PUT request break the code or not"""
assert fetcher.put(self.put_url, data={"key": "value"}).status == 200
assert (
fetcher.put(
self.put_url, data={"key": "value"}, stealthy_headers=True
).status
== 200
)
assert (
fetcher.put(
self.put_url, data={"key": "value"}, follow_redirects=True
).status
== 200
)
assert (
fetcher.put(self.put_url, data={"key": "value"}, timeout=None).status == 200
)
assert (
fetcher.put(
self.put_url,
data={"key": "value"},
stealthy_headers=True,
follow_redirects=True,
timeout=None,
).status
== 200
)
def test_delete_properties(self, fetcher):
"""Test if different arguments with the DELETE request break the code or not"""
assert fetcher.delete(self.delete_url, stealthy_headers=True).status == 200
assert fetcher.delete(self.delete_url, follow_redirects=True).status == 200
assert fetcher.delete(self.delete_url, timeout=None).status == 200
assert (
fetcher.delete(
self.delete_url,
stealthy_headers=True,
follow_redirects=True,
timeout=None,
).status
== 200
)
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/sync/test_requests.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/sync/test_requests_session.py | import pytest
from scrapling.engines.static import _SyncSessionLogic as FetcherSession, FetcherClient
class TestFetcherSession:
"""Test FetcherSession functionality"""
def test_fetcher_session_creation(self):
"""Test FetcherSession creation"""
session = FetcherSession(
timeout=30,
retries=3,
stealthy_headers=True
)
assert session._default_timeout == 30
assert session._default_retries == 3
def test_fetcher_session_context_manager(self):
"""Test FetcherSession as a context manager"""
session = FetcherSession()
with session as s:
assert s == session
assert session._curl_session is not None
# Session should be cleaned up
def test_fetcher_session_double_enter(self):
"""Test error on double entering"""
session = FetcherSession()
with session:
with pytest.raises(RuntimeError):
session.__enter__()
def test_fetcher_client_creation(self):
"""Test FetcherClient creation"""
client = FetcherClient()
# Should not have context manager methods
assert client.__enter__ is None
assert client.__exit__ is None
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/sync/test_requests_session.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/test_base.py | import pytest
from scrapling.engines.toolbelt.custom import BaseFetcher
class TestBaseFetcher:
"""Test BaseFetcher configuration functionality"""
def test_default_configuration(self):
"""Test default configuration values"""
config = BaseFetcher.display_config()
assert config['huge_tree'] is True
assert config['adaptive'] is False
assert config['keep_comments'] is False
assert config['keep_cdata'] is False
def test_configure_single_parameter(self):
"""Test configuring single parameter"""
BaseFetcher.configure(adaptive=True)
config = BaseFetcher.display_config()
assert config['adaptive'] is True
# Reset
BaseFetcher.configure(adaptive=False)
def test_configure_multiple_parameters(self):
"""Test configuring multiple parameters"""
BaseFetcher.configure(
huge_tree=False,
keep_comments=True,
adaptive=True
)
config = BaseFetcher.display_config()
assert config['huge_tree'] is False
assert config['keep_comments'] is True
assert config['adaptive'] is True
# Reset
BaseFetcher.configure(
huge_tree=True,
keep_comments=False,
adaptive=False
)
def test_configure_invalid_parameter(self):
"""Test configuring invalid parameter"""
with pytest.raises(ValueError):
BaseFetcher.configure(invalid_param=True)
def test_configure_no_parameters(self):
"""Test configure with no parameters"""
with pytest.raises(AttributeError):
BaseFetcher.configure()
def test_configure_non_parser_keyword(self):
"""Test configuring non-parser keyword"""
with pytest.raises(AttributeError):
# Assuming there's some attribute that's not in parser_keywords
BaseFetcher.some_other_attr = "test"
BaseFetcher.configure(some_other_attr="new_value")
def test_generate_parser_arguments(self):
"""Test parser arguments generation"""
BaseFetcher.configure(
huge_tree=False,
adaptive=True,
adaptive_domain="example.com"
)
args = BaseFetcher._generate_parser_arguments()
assert args['huge_tree'] is False
assert args['adaptive'] is True
assert args['adaptive_domain'] == "example.com"
# Reset
BaseFetcher.configure(
huge_tree=True,
adaptive=False
)
BaseFetcher.adaptive_domain = None
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/test_base.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/test_constants.py | from scrapling.engines.constants import EXTRA_RESOURCES, STEALTH_ARGS, HARMFUL_ARGS, DEFAULT_ARGS
class TestConstants:
"""Test constant values"""
def test_default_disabled_resources(self):
"""Test default disabled resources"""
assert "image" in EXTRA_RESOURCES
assert "font" in EXTRA_RESOURCES
assert "stylesheet" in EXTRA_RESOURCES
assert "media" in EXTRA_RESOURCES
def test_harmful_default_args(self):
"""Test harmful default arguments"""
assert "--enable-automation" in HARMFUL_ARGS
assert "--disable-popup-blocking" in HARMFUL_ARGS
def test_flags(self):
"""Test default stealth flags"""
assert "--no-pings" in DEFAULT_ARGS
# assert "--incognito" in STEALTH_ARGS
assert "--disable-blink-features=AutomationControlled" in STEALTH_ARGS
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/test_constants.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/test_pages.py | import pytest
from unittest.mock import Mock
from scrapling.engines._browsers._page import PageInfo, PagePool
class TestPageInfo:
"""Test PageInfo functionality"""
def test_page_info_creation(self):
"""Test PageInfo creation"""
mock_page = Mock()
page_info = PageInfo(mock_page, "ready", "https://example.com")
assert page_info.page == mock_page
assert page_info.state == "ready"
assert page_info.url == "https://example.com"
def test_page_info_marking(self):
"""Test marking page"""
mock_page = Mock()
page_info = PageInfo(mock_page, "ready", None)
page_info.mark_busy("https://example.com")
assert page_info.state == "busy"
assert page_info.url == "https://example.com"
page_info.mark_error()
assert page_info.state == "error"
def test_page_info_equality(self):
"""Test PageInfo equality comparison"""
mock_page1 = Mock()
mock_page2 = Mock()
page_info1 = PageInfo(mock_page1, "ready", None)
page_info2 = PageInfo(mock_page1, "busy", None) # Same page, different state
page_info3 = PageInfo(mock_page2, "ready", None) # Different page
assert page_info1 == page_info2 # Same page
assert page_info1 != page_info3 # Different page
assert page_info1 != "not a page info" # Different type
def test_page_info_repr(self):
"""Test PageInfo string representation"""
mock_page = Mock()
page_info = PageInfo(mock_page, "ready", "https://example.com")
repr_str = repr(page_info)
assert "ready" in repr_str
assert "https://example.com" in repr_str
class TestPagePool:
"""Test PagePool functionality"""
def test_page_pool_creation(self):
"""Test PagePool creation"""
pool = PagePool(max_pages=5)
assert pool.max_pages == 5
assert pool.pages_count == 0
assert pool.busy_count == 0
def test_add_page(self):
"""Test adding page to pool"""
pool = PagePool(max_pages=2)
mock_page = Mock()
page_info = pool.add_page(mock_page)
assert isinstance(page_info, PageInfo)
assert page_info.page == mock_page
assert page_info.state == "ready"
assert pool.pages_count == 1
def test_add_page_limit_exceeded(self):
"""Test adding page when limit exceeded"""
pool = PagePool(max_pages=1)
# Add first page
pool.add_page(Mock())
# Try to add a second page
with pytest.raises(RuntimeError):
pool.add_page(Mock())
def test_cleanup_error_pages(self):
"""Test cleaning up error pages"""
pool = PagePool(max_pages=3)
# Add pages
page1 = pool.add_page(Mock())
_ = pool.add_page(Mock())
page3 = pool.add_page(Mock())
# Mark some as error
page1.mark_error()
page3.mark_error()
assert pool.pages_count == 3
pool.cleanup_error_pages()
assert pool.pages_count == 1 # Only 2 should remain
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/test_pages.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/test_response_handling.py | from unittest.mock import Mock
from scrapling.parser import Selector
from scrapling.engines.toolbelt.convertor import ResponseFactory, Response
class TestResponseFactory:
"""Test ResponseFactory functionality"""
def test_response_from_curl(self):
"""Test creating response from curl_cffi response"""
# Mock curl response
mock_curl_response = Mock()
mock_curl_response.url = "https://example.com"
mock_curl_response.content = b"<html><body>Test</body></html>"
mock_curl_response.status_code = 200
mock_curl_response.reason = "OK"
mock_curl_response.encoding = "utf-8"
mock_curl_response.cookies = {"session": "abc"}
mock_curl_response.headers = {"Content-Type": "text/html"}
mock_curl_response.request.headers = {"User-Agent": "Test"}
mock_curl_response.request.method = "GET"
mock_curl_response.history = []
response = ResponseFactory.from_http_request(
mock_curl_response,
{"adaptive": False}
)
assert response.status == 200
assert response.url == "https://example.com"
assert isinstance(response, Response)
def test_response_history_processing(self):
"""Test processing response history"""
# Mock responses with redirects
mock_final = Mock()
mock_final.status = 200
mock_final.status_text = "OK"
mock_final.all_headers = Mock(return_value={})
mock_redirect = Mock()
mock_redirect.url = "https://example.com/redirect"
mock_redirect.response = Mock(return_value=mock_final)
mock_redirect.all_headers = Mock(return_value={})
mock_redirect.redirected_from = None
mock_first = Mock()
mock_first.request.redirected_from = mock_redirect
# Process history
history = ResponseFactory._process_response_history(
mock_first,
{}
)
assert len(history) >= 0 # Should process redirects
class TestErrorScenarios:
"""Test various error scenarios"""
def test_invalid_html_handling(self):
"""Test handling of malformed HTML"""
malformed_html = """
<html>
<body>
<div>Unclosed div
<p>Paragraph without closing tag
<span>Nested unclosed
</body>
"""
# Should handle gracefully
page = Selector(malformed_html)
assert page is not None
# Should still be able to select elements
divs = page.css("div")
assert len(divs) > 0
def test_empty_responses(self):
"""Test handling of empty responses"""
# Empty HTML
page = Selector("")
assert page is not None
# Whitespace only
page = Selector(" \n\t ")
assert page is not None
# Null bytes
page = Selector("Hello\x00World")
assert "Hello" in page.get_all_text()
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/test_response_handling.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/fetchers/test_validator.py | import pytest
from scrapling.engines._browsers._validators import (
validate,
StealthConfig,
PlaywrightConfig,
)
class TestValidators:
"""Test configuration validators"""
def test_playwright_config_valid(self):
"""Test valid PlaywrightConfig"""
params = {
"max_pages": 2,
"headless": True,
"timeout": 30000,
"proxy": "http://proxy.example.com:8080"
}
config = validate(params, PlaywrightConfig)
assert config.max_pages == 2
assert config.headless is True
assert config.timeout == 30000
assert isinstance(config.proxy, dict)
def test_playwright_config_invalid_max_pages(self):
"""Test PlaywrightConfig with invalid max_pages"""
params = {"max_pages": 0}
with pytest.raises(TypeError):
validate(params, PlaywrightConfig)
params = {"max_pages": 51}
with pytest.raises(TypeError):
validate(params, PlaywrightConfig)
def test_playwright_config_invalid_timeout(self):
"""Test PlaywrightConfig with an invalid timeout"""
params = {"timeout": -1}
with pytest.raises(TypeError):
validate(params, PlaywrightConfig)
def test_playwright_config_invalid_cdp_url(self):
"""Test PlaywrightConfig with invalid CDP URL"""
params = {"cdp_url": "invalid-url"}
with pytest.raises(TypeError):
validate(params, PlaywrightConfig)
def test_stealth_config_valid(self):
"""Test valid StealthConfig"""
params = {
"max_pages": 1,
"headless": True,
"solve_cloudflare": False,
"timeout": 30000
}
config = validate(params, StealthConfig)
assert config.max_pages == 1
assert config.headless is True
assert config.solve_cloudflare is False
assert config.timeout == 30000
def test_stealth_config_cloudflare_timeout(self):
"""Test StealthConfig timeout adjustment for Cloudflare"""
params = {
"solve_cloudflare": True,
"timeout": 10000 # Less than the required 60,000
}
config = validate(params, StealthConfig)
assert config.timeout == 60000 # Should be increased
def test_playwright_config_blocked_domains(self):
"""Test PlaywrightConfig with blocked_domains"""
params = {"blocked_domains": {"ads.example.com", "tracker.io"}}
config = validate(params, PlaywrightConfig)
assert config.blocked_domains == {"ads.example.com", "tracker.io"}
def test_playwright_config_blocked_domains_default_none(self):
"""Test PlaywrightConfig blocked_domains defaults to None"""
config = validate({}, PlaywrightConfig)
assert config.blocked_domains is None
def test_stealth_config_blocked_domains(self):
"""Test StealthConfig inherits blocked_domains"""
params = {"blocked_domains": {"ads.example.com"}}
config = validate(params, StealthConfig)
assert config.blocked_domains == {"ads.example.com"}
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/fetchers/test_validator.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/parser/test_attributes_handler.py | import pytest
import json
from scrapling import Selector
from scrapling.core.custom_types import AttributesHandler
class TestAttributesHandler:
"""Test AttributesHandler functionality"""
@pytest.fixture
def sample_html(self):
return """
<html>
<body>
<div id="main"
class="container active"
data-config='{"theme": "dark", "version": 2.5}'
data-items='[1, 2, 3, 4, 5]'
data-invalid-json='{"broken: json}'
title="Main Container"
style="color: red; background: blue;"
data-empty=""
data-number="42"
data-bool="true"
data-url="https://example.com/page?param=value"
custom-attr="custom-value"
data-nested='{"user": {"name": "John", "age": 30}}'
data-encoded="<div>HTML</div>"
onclick="handleClick()"
data-null="null"
data-undefined="undefined">
Content
</div>
<input type="text"
name="username"
value="test@example.com"
placeholder="Enter email"
required
disabled>
<img src="/images/photo.jpg"
alt="Photo"
width="100"
height="100"
loading="lazy">
</body>
</html>
"""
@pytest.fixture
def attributes(self, sample_html):
page = Selector(sample_html)
element = page.css("#main")[0]
return element.attrib
def test_basic_attribute_access(self, attributes):
"""Test basic attribute access"""
# Dict-like access
assert attributes["id"] == "main"
assert attributes["class"] == "container active"
assert attributes["title"] == "Main Container"
# Key existence
assert "id" in attributes
assert "nonexistent" not in attributes
# Get with default
assert attributes.get("id") == "main"
assert attributes.get("nonexistent") is None
assert attributes.get("nonexistent", "default") == "default"
def test_iteration_methods(self, attributes):
"""Test iteration over attributes"""
# Keys
keys = list(attributes.keys())
assert "id" in keys
assert "class" in keys
assert "data-config" in keys
# Values
values = list(attributes.values())
assert "main" in values
assert "container active" in values
# Items
items = dict(attributes.items())
assert items["id"] == "main"
assert items["class"] == "container active"
# Length
assert len(attributes) > 0
def test_json_parsing(self, attributes):
"""Test JSON parsing from attributes"""
# Valid JSON object
config = attributes["data-config"].json()
assert config["theme"] == "dark"
assert config["version"] == 2.5
# Valid JSON array
items = attributes["data-items"].json()
assert items == [1, 2, 3, 4, 5]
# Nested JSON
nested = attributes["data-nested"].json()
assert nested["user"]["name"] == "John"
assert nested["user"]["age"] == 30
# JSON null
assert attributes["data-null"].json() is None
def test_json_error_handling(self, attributes):
"""Test JSON parsing error handling"""
# Invalid JSON should raise error or return None
with pytest.raises((json.JSONDecodeError, AttributeError)):
attributes["data-invalid-json"].json()
# Non-existent attribute
with pytest.raises(KeyError):
attributes["nonexistent"].json()
def test_json_string_property(self, attributes):
"""Test json_string property"""
# Should return JSON representation of all attributes
json_string = attributes.json_string
assert isinstance(json_string, bytes)
# Parse it back
parsed = json.loads(json_string)
assert parsed["id"] == "main"
assert parsed["class"] == "container active"
def test_search_values(self, attributes):
"""Test search_values method"""
# Exact match
results = list(attributes.search_values("main", partial=False))
assert len(results) == 1
assert "id" in results[0]
# Partial match
results = list(attributes.search_values("container", partial=True))
assert len(results) >= 1
found_keys = []
for result in results:
found_keys.extend(result.keys())
assert "class" in found_keys or "title" in found_keys
# Case sensitivity
results = list(attributes.search_values("MAIN", partial=False))
assert len(results) == 0 # Should be case-sensitive by default
# Multiple matches
results = list(attributes.search_values("2", partial=True))
assert len(results) > 1 # Should find multiple attributes
# No matches
results = list(attributes.search_values("nonexistent", partial=False))
assert len(results) == 0
def test_special_attribute_types(self, sample_html):
"""Test handling of special attribute types"""
page = Selector(sample_html)
# Boolean attributes
input_elem = page.css("input")[0]
assert "required" in input_elem.attrib
assert "disabled" in input_elem.attrib
# Empty attributes
main_elem = page.css("#main")[0]
assert main_elem.attrib["data-empty"] == ""
# Numeric string attributes
assert main_elem.attrib["data-number"] == "42"
assert main_elem.attrib["data-bool"] == "true"
def test_attribute_modification(self, sample_html):
"""Test that AttributesHandler is read-only (if applicable)"""
page = Selector(sample_html)
element = page.css("#main")[0]
attrs = element.attrib
# Test if attributes can be modified
# This behavior depends on implementation
original_id = attrs["id"]
try:
attrs["id"] = "new-id"
# If modification is allowed
assert attrs["id"] == "new-id"
# Reset
attrs["id"] = original_id
except (TypeError, AttributeError):
# If modification is not allowed (read-only)
assert attrs["id"] == original_id
def test_string_representation(self, attributes):
"""Test string representations"""
# __str__
str_repr = str(attributes)
assert isinstance(str_repr, str)
assert "id" in str_repr or "main" in str_repr
# __repr__
repr_str = repr(attributes)
assert isinstance(repr_str, str)
def test_edge_cases(self, sample_html):
"""Test edge cases and special scenarios"""
page = Selector(sample_html)
# Element with no attributes
page_with_no_attrs = Selector("<div>Content</div>")
elem = page_with_no_attrs.css("div")[0]
assert len(elem.attrib) == 0
assert list(elem.attrib.keys()) == []
assert elem.attrib.get("any") is None
# Element with encoded content
main_elem = page.css("#main")[0]
encoded = main_elem.attrib["data-encoded"]
assert "<" in encoded # Should decode it
# Style attribute parsing
style = main_elem.attrib["style"]
assert "color: red" in style
assert "background: blue" in style
def test_url_attribute(self, attributes):
"""Test URL attributes"""
url = attributes["data-url"]
assert url == "https://example.com/page?param=value"
# Could test URL joining if AttributesHandler supports it
# based on the parent element's base URL
def test_comparison_operations(self, sample_html):
"""Test comparison operations if supported"""
page = Selector(sample_html)
elem1 = page.css("#main")[0]
elem2 = page.css("input")[0]
# Different elements should have different attributes
assert elem1.attrib != elem2.attrib
# The same element should have equal attributes
elem1_again = page.css("#main")[0]
assert elem1.attrib == elem1_again.attrib
def test_complex_search_patterns(self, attributes):
"""Test complex search patterns"""
# Search for JSON-containing attributes
json_attrs = []
for key, value in attributes.items():
try:
if isinstance(value, str) and (value.startswith('{') or value.startswith('[')):
json.loads(value)
json_attrs.append(key)
except:
pass
assert "data-config" in json_attrs
assert "data-items" in json_attrs
assert "data-nested" in json_attrs
def test_attribute_filtering(self, attributes):
"""Test filtering attributes by patterns"""
# Get all data-* attributes
data_attrs = {k: v for k, v in attributes.items() if k.startswith("data-")}
assert len(data_attrs) > 5
assert "data-config" in data_attrs
assert "data-items" in data_attrs
# Get all event handler attributes
event_attrs = {k: v for k, v in attributes.items() if k.startswith("on")}
assert "onclick" in event_attrs
def test_performance_with_many_attributes(self):
"""Test performance with elements having many attributes"""
# Create an element with many attributes
attrs_list = [f'data-attr{i}="value{i}"' for i in range(100)]
html = f'<div id="test" {" ".join(attrs_list)}>Content</div>'
page = Selector(html)
element = page.css("#test")[0]
attribs = element.attrib
# Should handle many attributes efficiently
assert len(attribs) == 101 # id + 100 data attributes
# Search should still work efficiently
results = list(attribs.search_values("value50", partial=False))
assert len(results) == 1
def test_unicode_attributes(self):
"""Test handling of Unicode in attributes"""
html = """
<div id="unicode-test"
data-emoji="😀🎉"
data-chinese="你好世界"
data-arabic="مرحبا بالعالم"
data-special="café naïve">
</div>
"""
page = Selector(html)
attrs = page.css("#unicode-test")[0].attrib
assert attrs["data-emoji"] == "😀🎉"
assert attrs["data-chinese"] == "你好世界"
assert attrs["data-arabic"] == "مرحبا بالعالم"
assert attrs["data-special"] == "café naïve"
# Search with Unicode
results = list(attrs.search_values("你好", partial=True))
assert len(results) == 1
def test_malformed_attributes(self):
"""Test handling of malformed attributes"""
# Various malformed HTML scenarios
test_cases = [
'<div id="test" class=>Content</div>', # Empty attribute value
'<div id="test" class>Content</div>', # No attribute value
'<div id="test" data-"invalid"="value">Content</div>', # Invalid attribute name
'<div id=test class=no-quotes>Content</div>', # Unquoted values
]
for html in test_cases:
try:
page = Selector(html)
if page.css("div"):
attrs = page.css("div")[0].attrib
# Should handle gracefully without crashing
assert isinstance(attrs, AttributesHandler)
except:
# Some malformed HTML might not parse at all
pass
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/parser/test_attributes_handler.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 280,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
D4Vinci/Scrapling:tests/parser/test_parser_advanced.py | import re
import pytest
from unittest.mock import Mock
from scrapling import Selector, Selectors
from scrapling.core.custom_types import TextHandler, TextHandlers
from scrapling.core.storage import SQLiteStorageSystem
class TestSelectorAdvancedFeatures:
"""Test advanced Selector features like adaptive matching"""
def test_adaptive_initialization_with_storage(self):
"""Test adaptive initialization with custom storage"""
html = "<html><body><p>Test</p></body></html>"
# Use the actual SQLiteStorageSystem for this test
selector = Selector(
content=html,
adaptive=True,
storage=SQLiteStorageSystem,
storage_args={"storage_file": ":memory:", "url": "https://example.com"}
)
assert selector._Selector__adaptive_enabled is True
assert selector._storage is not None
def test_adaptive_initialization_with_default_storage_args(self):
"""Test adaptive initialization with default storage args"""
html = "<html><body><p>Test</p></body></html>"
url = "https://example.com"
# Test that adaptive mode uses default storage when no explicit args provided
selector = Selector(
content=html,
url=url,
adaptive=True
)
# Should create storage with default args
assert selector._storage is not None
def test_adaptive_with_existing_storage(self):
"""Test adaptive initialization with existing storage object"""
html = "<html><body><p>Test</p></body></html>"
mock_storage = Mock()
selector = Selector(
content=html,
adaptive=True,
_storage=mock_storage
)
assert selector._storage is mock_storage
class TestAdvancedSelectors:
"""Test advanced selector functionality"""
@pytest.fixture
def complex_html(self):
return """
<html>
<body>
<div class="container" data-test='{"key": "value"}'>
<p>First paragraph</p>
<!-- Comment -->
<p>Second paragraph</p>
<![CDATA[Some CDATA content]]>
<div class="nested">
<span id="special">Special content</span>
<span>Regular content</span>
</div>
<table>
<tr><td>Cell 1</td><td>Cell 2</td></tr>
<tr><td>Cell 3</td><td>Cell 4</td></tr>
</table>
</div>
</body>
</html>
"""
def test_comment_and_cdata_handling(self, complex_html):
"""Test handling of comments and CDATA"""
# With comments/CDATA kept
page = Selector(
complex_html,
keep_comments=True,
keep_cdata=True
)
content = page.body
assert "Comment" in content
assert "CDATA" in content
# Without comments/CDATA
page = Selector(
complex_html,
keep_comments=False,
keep_cdata=False
)
content = page.html_content
assert "Comment" not in content
def test_advanced_xpath_variables(self, complex_html):
"""Test XPath with variables"""
page = Selector(complex_html)
# Using XPath variables
cells = page.xpath(
"//td[text()=$cell_text]",
cell_text="Cell 1"
)
assert len(cells) == 1
assert cells[0].text == "Cell 1"
def test_pseudo_elements(self, complex_html):
"""Test CSS pseudo-elements"""
page = Selector(complex_html)
# ::text pseudo-element
texts = page.css("p::text")
assert len(texts) == 2
assert isinstance(texts[0], Selector)
assert isinstance(texts[0].get(), TextHandler)
# ::attr() pseudo-element
attrs = page.css("div::attr(class)")
assert "container" in attrs.getall()
def test_complex_attribute_operations(self, complex_html):
"""Test complex attribute handling"""
page = Selector(complex_html)
container = page.css(".container")[0]
# JSON in attributes
data = container.attrib["data-test"].json()
assert data["key"] == "value"
# Attribute searching
matches = list(container.attrib.search_values("container"))
assert len(matches) == 1
def test_url_joining(self):
"""Test URL joining functionality"""
page = Selector("<html></html>", url="https://example.com/page")
# Relative URL
assert page.urljoin("../other") == "https://example.com/other"
assert page.urljoin("/absolute") == "https://example.com/absolute"
assert page.urljoin("relative") == "https://example.com/relative"
def test_find_operations_edge_cases(self, complex_html):
"""Test edge cases in find operations"""
page = Selector(complex_html)
# Multiple argument types
_ = page.find_all(
"span",
["div"],
{"class": "nested"},
lambda e: e.text != ""
)
# Regex pattern matching
pattern = re.compile(r"Cell \d+")
cells = page.find_all(pattern)
assert len(cells) == 4
def test_text_operations_edge_cases(self, complex_html):
"""Test text operation edge cases"""
page = Selector(complex_html)
# get_all_text with a custom separator
text = page.get_all_text(separator=" | ", strip=True)
assert " | " in text
# Ignore specific tags
text = page.get_all_text(ignore_tags=("table",))
assert "Cell" not in text
# With empty values
text = page.get_all_text(valid_values=False)
assert text != ""
class TestTextHandlerAdvanced:
"""Test advanced TextHandler functionality"""
def test_text_handler_operations(self):
"""Test various TextHandler operations"""
text = TextHandler(" Hello World ")
# All string methods should return TextHandler
assert isinstance(text.strip(), TextHandler)
assert isinstance(text.upper(), TextHandler)
assert isinstance(text.lower(), TextHandler)
assert isinstance(text.replace("World", "Python"), TextHandler)
# Custom methods
assert text.clean() == "Hello World"
# Sorting
text2 = TextHandler("dcba")
assert text2.sort() == "abcd"
def test_text_handler_regex(self):
"""Test regex operations on TextHandler"""
text = TextHandler("Price: $10.99, Sale: $8.99")
# Basic regex
prices = text.re(r"\$[\d.]+")
assert len(prices) == 2
assert prices[0] == "$10.99"
# Case insensitive
text2 = TextHandler("HELLO hello HeLLo")
matches = text2.re(r"hello", case_sensitive=False)
assert len(matches) == 3
# Clean match
text3 = TextHandler(" He l lo ")
matches = text3.re(r"He l lo", clean_match=True, case_sensitive=False)
assert len(matches) == 1
def test_text_handlers_operations(self):
"""Test TextHandlers list operations"""
handlers = TextHandlers([
TextHandler("First"),
TextHandler("Second"),
TextHandler("Third")
])
# Slicing should return TextHandlers
assert isinstance(handlers[0:2], TextHandlers)
# Get methods
assert handlers.get() == "First"
assert handlers.get("default") == "First"
assert TextHandlers([]).get("default") == "default"
class TestSelectorsAdvanced:
"""Test advanced Selectors functionality"""
def test_selectors_filtering(self):
"""Test filtering operations on Selectors"""
html = """
<div>
<p class="highlight">Important</p>
<p>Regular</p>
<p class="highlight">Also important</p>
</div>
"""
page = Selector(html)
paragraphs = page.css("p")
# Filter by class
highlighted = paragraphs.filter(lambda p: p.has_class("highlight"))
assert len(highlighted) == 2
# Search for a specific element
found = paragraphs.search(lambda p: p.text == "Regular")
assert found is not None
assert found.text == "Regular"
def test_selectors_properties(self):
"""Test Selectors properties"""
html = "<div><p>1</p><p>2</p><p>3</p></div>"
page = Selector(html)
paragraphs = page.css("p")
assert paragraphs.first.text == "1"
assert paragraphs.last.text == "3"
assert paragraphs.length == 3
| {
"repo_id": "D4Vinci/Scrapling",
"file_path": "tests/parser/test_parser_advanced.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 220,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:AI/racecheck_repro_1d_bulk.py | """Minimal reproducer: cp.async.bulk (raw address) triggers racecheck hazard.
Warp 0 loads via cp.async.bulk, warp 1 reads from smem after mbarrier wait.
Pipeline is correctly synchronized but racecheck reports 1 error.
python AI/racecheck_repro_1d_bulk.py # correctness
CUTE_DSL_LINEINFO=1 compute-sanitizer --tool=racecheck python AI/racecheck_repro_1d_bulk.py # 1 error
"""
import cutlass
import cutlass.cute as cute
from cutlass.cute.nvgpu import cpasync
from cutlass.cute.runtime import from_dlpack
from cutlass import Float32, Int32
import cutlass.pipeline
from cutlass.pipeline.sm90 import PipelineTmaAsync, make_pipeline_state
import cuda.bindings.driver as cuda
import torch
N_BLKS, TILE = 4, 128
N_STG = 2
@cute.kernel
def kernel(g_src: cute.Tensor, g_dst: cute.Tensor):
smem = cutlass.utils.SmemAllocator()
s = smem.allocate_tensor(Float32, cute.make_layout((TILE, N_STG)), byte_alignment=128)
s_mbar = smem.allocate_tensor(cutlass.Int64, cute.make_layout(2 * N_STG), byte_alignment=8)
tidx, _, _ = cute.arch.thread_idx()
warp, lane = tidx // 32, tidx % 32
pipe = PipelineTmaAsync.create(
barrier_storage=s_mbar.iterator, num_stages=N_STG,
producer_group=cutlass.pipeline.CooperativeGroup(cutlass.pipeline.Agent.Thread, 1),
consumer_group=cutlass.pipeline.CooperativeGroup(cutlass.pipeline.Agent.Thread, 1),
tx_count=TILE * 4, defer_sync=False,
)
src = cute.local_tile(g_src, (TILE,), (None,))
dst = cute.local_tile(g_dst, (TILE,), (None,))
if warp == 0:
ps = make_pipeline_state(cutlass.pipeline.PipelineUserType.Producer, N_STG)
for blk in cutlass.range(N_BLKS, unroll=1):
pipe.producer_acquire(ps)
atom = cute.make_copy_atom(cpasync.CopyBulkG2SOp(), Float32)
with cute.arch.elect_one():
cute.copy(atom, src[None, blk], s[None, ps.index],
mbar_ptr=pipe.producer_get_barrier(ps))
ps.advance()
pipe.producer_tail(ps)
if warp == 1:
cs = make_pipeline_state(cutlass.pipeline.PipelineUserType.Consumer, N_STG)
for blk in cutlass.range(N_BLKS, unroll=1):
pipe.consumer_wait(cs)
for i in cutlass.range_constexpr(TILE // 32):
dst[lane + i * 32, blk] = s[lane + i * 32, cs.index]
cute.arch.fence_view_async_shared()
cute.arch.sync_warp() # Ned sync_warp as only 1 thread will signal in consumer_release
pipe.consumer_release(cs)
cs.advance()
@cute.jit
def go(g_src, g_dst, stream):
kernel(g_src, g_dst).launch(grid=[1, 1, 1], block=[64, 1, 1], smem=4096, stream=stream)
if __name__ == "__main__":
src = torch.arange(TILE * N_BLKS, device="cuda", dtype=torch.float32)
dst = torch.zeros_like(src)
go(from_dlpack(src, assumed_align=16), from_dlpack(dst, assumed_align=16),
cuda.CUstream(torch.cuda.current_stream().cuda_stream))
torch.cuda.synchronize()
assert torch.equal(src, dst), f"FAIL: max diff={torch.abs(src - dst).max().item()}"
print("PASS")
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "AI/racecheck_repro_1d_bulk.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Dao-AILab/flash-attention:AI/racecheck_repro_1d_tensor.py | """Minimal reproducer: cp.async.bulk.tensor.1d (descriptor TMA) passes racecheck.
Same pipeline as racecheck_repro_1d_bulk.py but uses make_tiled_tma_atom to
create a TMA descriptor, which generates cp.async.bulk.tensor.1d PTX.
python AI/racecheck_repro_1d_tensor.py # correctness
CUTE_DSL_LINEINFO=1 compute-sanitizer --tool=racecheck python AI/racecheck_repro_1d_tensor.py # 0 hazards
"""
import cutlass
import cutlass.cute as cute
from cutlass.cute.nvgpu import cpasync
from cutlass.cute.runtime import from_dlpack
from cutlass import Float32, Int32
import cutlass.pipeline
from cutlass.pipeline.sm90 import PipelineTmaAsync, make_pipeline_state
import cuda.bindings.driver as cuda
import torch
N_BLKS, TILE = 4, 128
N_STG = 2
@cute.kernel
def kernel(g_dst: cute.Tensor, tma_atom: cute.CopyAtom, tma_tensor: cute.Tensor):
smem = cutlass.utils.SmemAllocator()
s = smem.allocate_tensor(Float32, cute.make_layout((TILE, N_STG)), byte_alignment=128)
s_mbar = smem.allocate_tensor(cutlass.Int64, cute.make_layout(2 * N_STG), byte_alignment=8)
tidx, _, _ = cute.arch.thread_idx()
warp, lane = tidx // 32, tidx % 32
pipe = PipelineTmaAsync.create(
barrier_storage=s_mbar.iterator, num_stages=N_STG,
producer_group=cutlass.pipeline.CooperativeGroup(cutlass.pipeline.Agent.Thread, 1),
consumer_group=cutlass.pipeline.CooperativeGroup(cutlass.pipeline.Agent.Thread, 1),
tx_count=TILE * 4, defer_sync=False,
)
tma_s, tma_g = cpasync.tma_partition(
tma_atom, Int32(0), cute.make_layout(1),
cute.group_modes(s, 0, 1),
cute.group_modes(cute.local_tile(tma_tensor, (TILE,), (None,)), 0, 1),
)
dst = cute.local_tile(g_dst, (TILE,), (None,))
if warp == 0:
with cute.arch.elect_one():
cpasync.prefetch_descriptor(tma_atom)
if warp == 0:
ps = make_pipeline_state(cutlass.pipeline.PipelineUserType.Producer, N_STG)
for blk in cutlass.range(N_BLKS, unroll=1):
pipe.producer_acquire(ps)
cute.copy(tma_atom, tma_g[None, blk], tma_s[None, ps.index],
tma_bar_ptr=pipe.producer_get_barrier(ps))
ps.advance()
pipe.producer_tail(ps)
if warp == 1:
cs = make_pipeline_state(cutlass.pipeline.PipelineUserType.Consumer, N_STG)
for blk in cutlass.range(N_BLKS, unroll=1):
pipe.consumer_wait(cs)
for i in cutlass.range_constexpr(TILE // 32):
dst[lane + i * 32, blk] = s[lane + i * 32, cs.index]
cute.arch.fence_view_async_shared()
cute.arch.sync_warp() # Ned sync_warp as only 1 thread will signal in consumer_release
pipe.consumer_release(cs)
cs.advance()
@cute.jit
def go(g_src, g_dst, stream):
tma_atom, tma_tensor = cpasync.make_tiled_tma_atom(
cpasync.CopyBulkTensorTileG2SOp(), g_src, cute.make_layout(TILE), (TILE,),
)
kernel(g_dst, tma_atom, tma_tensor).launch(
grid=[1, 1, 1], block=[64, 1, 1], smem=4096, stream=stream,
)
if __name__ == "__main__":
src = torch.arange(TILE * N_BLKS, device="cuda", dtype=torch.float32)
dst = torch.zeros_like(src)
go(from_dlpack(src, assumed_align=16), from_dlpack(dst, assumed_align=16),
cuda.CUstream(torch.cuda.current_stream().cuda_stream))
torch.cuda.synchronize()
assert torch.equal(src, dst), f"FAIL: max diff={torch.abs(src - dst).max().item()}"
print("PASS")
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "AI/racecheck_repro_1d_tensor.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Dao-AILab/flash-attention:flash_attn/cute/cache_utils.py | # Manage Ahead-of-Time (AOT) compiled kernels
import fcntl
import hashlib
import logging
import os
import pickle
import sys
import tempfile
import time
from distutils.ccompiler import CCompiler, new_compiler
from functools import lru_cache
from getpass import getuser
from pathlib import Path
from typing import Hashable, TypeAlias
import cutlass
import cutlass.cute as cute
import tvm_ffi
from cutlass.cutlass_dsl import JitCompiledFunction
CompileKeyType: TypeAlias = tuple[Hashable, ...]
CallableFunction: TypeAlias = JitCompiledFunction | tvm_ffi.Function
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.WARNING)
# Enable cache via `FLASH_ATTENTION_CUTE_DSL_CACHE_ENABLED=1`
CUTE_DSL_CACHE_ENABLED: bool = os.getenv("FLASH_ATTENTION_CUTE_DSL_CACHE_ENABLED", "0") == "1"
# Customize cache dir via `FLASH_ATTENTION_CUTE_DSL_CACHE_DIR`, default is
# `/tmp/${USER}/flash_attention_cute_dsl_cache``
CUTE_DSL_CACHE_DIR: str | None = os.getenv("FLASH_ATTENTION_CUTE_DSL_CACHE_DIR", None)
def get_cache_path() -> Path:
if CUTE_DSL_CACHE_DIR is not None:
cache_dir = Path(CUTE_DSL_CACHE_DIR)
else:
cache_dir = Path(tempfile.gettempdir()) / getuser() / "flash_attention_cute_dsl_cache"
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir
@lru_cache(maxsize=1)
def _compute_source_fingerprint() -> str:
"""
Hash all CuTe Python sources plus runtime ABI stamps into a short fingerprint.
The fingerprint changes whenever:
- Any .py file under flash_attn/cute is added, removed, renamed, or modified.
- The Python minor version changes (e.g. 3.13 -> 3.14).
- The cutlass or tvm_ffi package version changes.
Computed once per process and cached.
"""
cute_root = Path(__file__).resolve().parent
h = hashlib.sha256()
h.update(f"py{sys.version_info.major}.{sys.version_info.minor}".encode())
h.update(f"cutlass={cutlass.__version__}".encode())
h.update(f"tvm_ffi={tvm_ffi.__version__}".encode())
for src in sorted(cute_root.rglob("*.py")):
h.update(src.relative_to(cute_root).as_posix().encode())
content = src.read_bytes()
h.update(len(content).to_bytes(8, "little"))
h.update(content)
return h.hexdigest()
class FileLock:
"""Context manager for advisory file locks using fcntl.flock.
Supports exclusive (write) and shared (read) locks.
Always blocks with polling until the lock is acquired or timeout is reached.
Usage:
with FileLock(lock_path, exclusive=True, timeout=15, label="abc"):
# do work under lock
"""
def __init__(
self,
lock_path: Path,
exclusive: bool,
timeout: float = 15,
label: str = "",
):
"""
Args:
lock_path: Path to the lock file on disk.
exclusive: True for exclusive (write) lock, False for shared (read) lock.
timeout: Max seconds to wait for lock acquisition before raising RuntimeError.
label: Optional human-readable label for error messages.
"""
self.lock_path: Path = lock_path
self.exclusive: bool = exclusive
self.timeout: float = timeout
self.label: str = label
self._fd: int = -1
@property
def _lock_label(self) -> str:
kind = "exclusive" if self.exclusive else "shared"
return f"{kind} {self.label}" if self.label else kind
def __enter__(self) -> "FileLock":
open_flags = (
os.O_WRONLY | os.O_CREAT if self.exclusive else os.O_RDONLY | os.O_CREAT
)
lock_type = fcntl.LOCK_EX if self.exclusive else fcntl.LOCK_SH
self._fd = os.open(str(self.lock_path), open_flags)
deadline = time.monotonic() + self.timeout
acquired = False
while time.monotonic() < deadline:
try:
fcntl.flock(self._fd, lock_type | fcntl.LOCK_NB)
acquired = True
break
except OSError:
time.sleep(0.1)
if not acquired:
os.close(self._fd)
self._fd = None
raise RuntimeError(
f"Timed out after {self.timeout}s waiting for "
f"{self._lock_label} lock: {self.lock_path}"
)
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
if self._fd is not None:
fcntl.flock(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
self._fd = None
class JITCache:
"""
In-memory cache for compiled functions.
"""
def __init__(self):
self.cache: dict[CompileKeyType, CallableFunction] = {}
def __setitem__(self, key: CompileKeyType, fn: JitCompiledFunction) -> None:
self.cache[key] = fn
def __getitem__(self, key: CompileKeyType) -> CallableFunction:
return self.cache[key]
def __contains__(self, key: CompileKeyType) -> bool:
return key in self.cache
def clear(self) -> None:
"""
Clear in-memory cache of compiled functions
"""
self.cache.clear()
class JITPersistentCache(JITCache):
"""
In-memory cache for compiled functions, which is also backed by persistent storage.
Use cutedsl ahead-of-time (AOT) compilation, only supporting enable_tvm_ffi=True
"""
EXPORT_FUNCTION_PREFIX = "func"
LOCK_TIMEOUT_SECONDS = 15
_compiler: CCompiler | None = None
def __init__(self, cache_path: Path):
super().__init__()
cache_path.mkdir(parents=True, exist_ok=True)
self.cache_path: Path = cache_path
def __setitem__(self, key: CompileKeyType, fn: JitCompiledFunction) -> None:
JITCache.__setitem__(self, key, fn)
self._try_export_to_storage(key, fn)
def __getitem__(self, key: CompileKeyType) -> CallableFunction:
# Use __contains__ to try populating in-memory cache with persistent storage
self.__contains__(key)
return JITCache.__getitem__(self, key)
def __contains__(self, key: CompileKeyType) -> bool:
# Checks in-memory cache first, then tries loading from storage.
# When returning True, guarantees the in-memory cache is populated.
if JITCache.__contains__(self, key):
return True
return self._try_load_from_storage(key)
def _try_load_from_storage(self, key: CompileKeyType) -> bool:
"""
Try to load a function from persistent storage into in-memory cache.
Returns True if loaded successfully, False if not found on disk.
Holds a shared lock during loading to prevent concurrent writes.
"""
sha256_hex = self._key_to_hash(key)
so_path = self.cache_path / f"{sha256_hex}.so"
with FileLock(
self._lock_path(sha256_hex),
exclusive=False,
timeout=self.LOCK_TIMEOUT_SECONDS,
label=sha256_hex,
):
if so_path.exists():
logger.debug(
"Loading compiled function from disk: %s", so_path
)
m = cute.runtime.load_module(
str(so_path), enable_tvm_ffi=True
)
fn = getattr(m, self.EXPORT_FUNCTION_PREFIX)
JITCache.__setitem__(self, key, fn)
return True
else:
logger.debug(
"Cache miss on disk for key hash %s", sha256_hex
)
return False
def _try_export_to_storage(
self, key: CompileKeyType, fn: JitCompiledFunction
) -> None:
"""Export a compiled function to persistent storage under exclusive lock."""
sha256_hex = self._key_to_hash(key)
with FileLock(
self._lock_path(sha256_hex),
exclusive=True,
timeout=self.LOCK_TIMEOUT_SECONDS,
label=sha256_hex,
):
so_path = self.cache_path / f"{sha256_hex}.so"
if so_path.exists():
# Another process already exported.
logger.debug(
"Skipping export, already on disk: %s", so_path
)
return
obj_path = self.cache_path / f"{sha256_hex}.o"
logger.debug(
"Exporting compiled function to disk: %s", so_path
)
fn.export_to_c(
object_file_path=str(obj_path),
function_name=self.EXPORT_FUNCTION_PREFIX,
)
# TODO: as of cutedsl 4.4.0, `export_to_c` only supports exporting
# "relocatable" .o files. But tvm_ffi expects "shared library" .so
# files. Link ourselves to workaround.
if JITPersistentCache._compiler is None:
JITPersistentCache._compiler = new_compiler()
JITPersistentCache._compiler.link_shared_object(
[str(obj_path)], str(so_path)
)
obj_path.unlink()
logger.debug(
"Successfully exported compiled function to disk: %s", so_path
)
def _key_to_hash(self, key: CompileKeyType) -> str:
return hashlib.sha256(pickle.dumps(key)).hexdigest()
def _lock_path(self, sha256_hex: str) -> Path:
return self.cache_path / f"{sha256_hex}.lock"
def clear(self) -> None:
"""
Not only clear the in-memory cache. Also purge persistent compilation cache.
"""
logger.debug(
"Clearing persistent cache at %s", self.cache_path
)
super().clear()
for child in self.cache_path.iterdir():
child.unlink()
def get_jit_cache(name: str | None = None) -> JITCache:
"""
JIT cache factory.
`name` is an optional identifier to create subdirectories to manage cache.
When persistent caching is enabled, artifacts are namespaced under a
source fingerprint directory so that code or dependency changes
automatically invalidate stale entries.
"""
if CUTE_DSL_CACHE_ENABLED:
path = get_cache_path() / _compute_source_fingerprint()
if name:
path = path / name
logger.debug(
"Creating persistent JIT cache at %s", path
)
return JITPersistentCache(path)
else:
logger.debug("Persistent cache disabled, using in-memory JIT cache")
return JITCache()
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/cache_utils.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/flash_attn_triton_amd/interface_v2.py | import torch
import os
from typing import Literal, Optional, Union
from .fwd_prefill import attention_forward_prefill_triton_impl
from .fwd_decode import attention_forward_decode_triton_impl
from .bwd import attention_backward_triton_impl
from .utils import (
DEBUG,
USE_EXP2,
BWD_MODE,
PHILOX_SEED,
PHILOX_OFFSET,
SHAPE_EXPECTATIONS,
round_multiple,
)
def fwd(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
out: Optional[torch.Tensor],
alibi_slopes: Optional[torch.Tensor],
dropout_p: float,
softmax_scale: float,
causal: bool,
window_size_left: int,
window_size_right: int,
softcap: float,
return_softmax: bool,
gen_: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], torch.Tensor]:
# Reject FP8 tensors (FA2 AMD path does not support FP8)
if str(q.dtype).startswith("torch.float8"):
raise NotImplementedError(
"FP8 tensors are not supported in the AMD Triton FA2 interface. Use the FA3 path instead."
)
# Unsupported features assertions (keep behavior explicit like v3 shim)
if softcap != 0.0:
raise NotImplementedError(
"softcap is not supported in the AMD Triton FA2 interface (expected 0.0)."
)
if DEBUG:
print()
print("flash_attn_triton_amd.py::fwd inputs")
print("q:", q.shape)
print("k:", k.shape)
print("v:", v.shape)
print("out:", out.shape if out is not None else None)
print("alibi_slopes:", alibi_slopes.shape if alibi_slopes is not None else None)
print("dropout_p:", dropout_p)
print("softmax_scale:", softmax_scale)
print("causal:", causal)
print("window_size_left:", window_size_left)
print("window_size_right:", window_size_right)
print("softcap:", softcap)
print("return_softmax:", return_softmax)
if out is None:
out = torch.zeros_like(q)
else:
out.zero_()
# Layout / shapes
layout: Literal["bshd", "bhsd", "thd"] = "bshd"
max_seqlen_q = q.shape[1]
max_seqlen_k = k.shape[1]
batch, _, nheads_q, _ = q.shape
# Normalize / validate alibi
if alibi_slopes is not None:
if alibi_slopes.dim() == 1:
alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1)
assert alibi_slopes.is_cuda and alibi_slopes.dim() == 2
assert alibi_slopes.shape == (batch, nheads_q)
# Dropout + RNG seed
philox_seed, philox_offset = PHILOX_SEED, PHILOX_OFFSET
rng_state = torch.as_tensor([philox_seed, philox_offset])
# argument checks
assert q.dim() == 4 and k.dim() == 4 and v.dim() == 4
assert q.shape[-1] == k.shape[-1] == v.shape[-1]
assert q.dtype == k.dtype == v.dtype
assert out.shape[:-1] == q.shape[:-1] and out.shape[-1] == v.shape[-1]
nheads_k = k.shape[2]
assert (nheads_q % nheads_k) == 0
# Create output tensors based on shape expectations
if SHAPE_EXPECTATIONS == "rounded":
softmax_lse = torch.zeros(
(batch, nheads_q, round_multiple(max_seqlen_q, 128)),
device=q.device,
dtype=torch.float32,
)
if dropout_p > 0.0 or return_softmax:
sd_mask = torch.zeros(
(
batch,
nheads_q,
round_multiple(max_seqlen_q, 128),
round_multiple(max_seqlen_k, 128),
),
device=q.device,
dtype=torch.float32,
)
else:
sd_mask = None
else:
softmax_lse = torch.zeros(
(batch, nheads_q, max_seqlen_q),
device=q.device,
dtype=torch.float32,
)
if dropout_p > 0.0 or return_softmax:
sd_mask = torch.zeros(
(batch, nheads_q, max_seqlen_q, max_seqlen_k),
device=q.device,
dtype=torch.float32,
)
else:
sd_mask = None
# call implementation
if DEBUG:
print("Using Triton implementation")
attention_forward_prefill_triton_impl(
q,
k,
v,
out,
softmax_lse,
sd_mask,
softmax_scale,
alibi_slopes,
causal,
window_size_left,
window_size_right,
None,
layout,
None,
None,
max_seqlen_q,
max_seqlen_k,
dropout_p,
philox_seed,
philox_offset,
return_softmax,
USE_EXP2,
None,
None,
None,
None,
None,
None,
None,
)
if DEBUG:
print("flash_attn_triton_amd.py::fwd outputs")
print("out:", out.shape)
print("softmax_lse:", softmax_lse.shape)
print("sd_mask:", sd_mask.shape if sd_mask is not None else None)
print("rng_state:", rng_state)
# --- Assertions (shape + dtype contracts) ---
# out: (B, Sq, Hq, D)
assert out.shape == q.shape, f"[fwd] out shape {out.shape} != q shape {q.shape}"
# softmax_lse dtype
assert (
softmax_lse.dtype == torch.float32
), f"[fwd] softmax_lse dtype {softmax_lse.dtype} != torch.float32"
# softmax_lse shape depends on SHAPE_EXPECTATIONS
if SHAPE_EXPECTATIONS == "rounded":
expected_lse_shape = (q.shape[0], q.shape[2], round_multiple(q.shape[1], 128))
else:
expected_lse_shape = (q.shape[0], q.shape[2], q.shape[1])
assert (
softmax_lse.shape == expected_lse_shape
), f"[fwd] softmax_lse shape {softmax_lse.shape} != {expected_lse_shape}"
if return_softmax:
# sd_mask: (B, Hq, Sq, Sk)
assert sd_mask is not None, "[fwd] return_softmax=True but sd_mask is None"
assert sd_mask.dim() == 4, f"[fwd] sd_mask dim {sd_mask.dim()} != 4"
if SHAPE_EXPECTATIONS == "rounded":
expected_sq = round_multiple(q.shape[1], 128)
expected_sk = round_multiple(k.shape[1], 128)
assert (
sd_mask.shape[0] == q.shape[0]
and sd_mask.shape[1] == q.shape[2]
and sd_mask.shape[2] == expected_sq
and sd_mask.shape[3] == expected_sk
), f"[fwd] sd_mask shape {sd_mask.shape} != (B={q.shape[0]}, Hq={q.shape[2]}, Sq={expected_sq}, Sk={expected_sk})"
else:
assert (
sd_mask.shape[0] == q.shape[0]
and sd_mask.shape[1] == q.shape[2]
and sd_mask.shape[2] == q.shape[1]
), f"[fwd] sd_mask leading dims {sd_mask.shape[:3]} mismatch (B,Hq,Sq) {(q.shape[0], q.shape[2], q.shape[1])}"
else:
assert sd_mask is None, "[fwd] return_softmax=False but sd_mask is not None"
return out, softmax_lse, sd_mask, rng_state
def bwd(
dout: torch.Tensor,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
out: torch.Tensor,
softmax_lse: torch.Tensor,
dq: Optional[torch.Tensor],
dk: Optional[torch.Tensor],
dv: Optional[torch.Tensor],
alibi_slopes: Optional[torch.Tensor],
dropout_p: float,
softmax_scale: float,
causal: bool,
window_size_left: int,
window_size_right: int,
softcap: float,
deterministic: bool,
gen_: Optional[torch.Tensor] = None,
rng_state: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
if softcap != 0.0:
raise NotImplementedError(
"softcap is not supported in the AMD Triton FA2 interface (expected 0.0)."
)
# Check for sliding window - backward doesn't support it yet
is_sliding_window = (window_size_left >= 0) or (window_size_right >= 0)
if is_sliding_window:
raise NotImplementedError(
f"Sliding window attention is not yet supported in the AMD Triton backward pass "
f"(window_size_left={window_size_left}, window_size_right={window_size_right}). "
f"Use window_size=(-1, -1) for full attention."
)
if DEBUG:
print()
print("flash_attn_triton_amd.py::bwd inputs")
print("dout:", dout.shape)
print("q:", q.shape)
print("k:", k.shape)
print("v:", v.shape)
print("out:", out.shape)
print("softmax_lse:", softmax_lse.shape)
print("dq:", dq.shape if dq is not None else None)
print("dk:", dk.shape if dk is not None else None)
print("dv:", dv.shape if dv is not None else None)
print("alibi_slopes:", alibi_slopes.shape if alibi_slopes is not None else None)
print("dropout_p:", dropout_p)
print("softmax_scale:", softmax_scale)
print("causal:", causal)
print("window_size_left:", window_size_left)
print("window_size_right:", window_size_right)
print("deterministic:", deterministic)
print("rng_state:", rng_state)
dq = torch.zeros_like(q) if dq is None else dq.zero_()
dk = torch.zeros_like(k) if dk is None else dk.zero_()
dv = torch.zeros_like(v) if dv is None else dv.zero_()
# get shape
batch, seqlen_q, nheads_q, _ = q.shape
# Create delta tensor with shape based on expectations
# delta (softmax_d) : (B, Hq, Sq) or (B, Hq, round_multiple(Sq, 128))
if SHAPE_EXPECTATIONS == "rounded":
delta = torch.zeros(
(batch, nheads_q, round_multiple(seqlen_q, 128)),
device=q.device,
dtype=torch.float32,
)
else:
delta = torch.zeros(
(batch, nheads_q, seqlen_q), device=q.device, dtype=torch.float32
)
# Upstream change: base seeding logic on provided rng_state instead of dropout probability.
if rng_state is not None:
philox_seed, philox_offset = rng_state[0].item(), rng_state[1].item()
else:
philox_seed, philox_offset = None, None
if alibi_slopes is not None:
if alibi_slopes.dim() == 2:
pass
elif alibi_slopes.dim() == 1:
alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1)
else:
raise ValueError("Alibi can be (nheads,) or (batch_size, nheads).")
# call implementation
if DEBUG:
print(f"Using Triton implementation in {BWD_MODE} mode")
attention_backward_triton_impl(
do=dout,
q=q,
k=k,
v=v,
o=out,
softmax_lse=softmax_lse,
dq=dq,
dk=dk,
dv=dv,
delta=delta,
sm_scale=softmax_scale,
alibi_slopes=alibi_slopes,
causal=causal,
layout="bshd",
cu_seqlens_q=None,
cu_seqlens_k=None,
max_seqlen_q=seqlen_q,
max_seqlen_k=k.shape[1],
seqused_q=None,
seqused_k=None,
dropout_p=dropout_p,
philox_seed=philox_seed,
philox_offset=philox_offset,
use_exp2=USE_EXP2,
mode=BWD_MODE,
)
if DEBUG:
print("flash_attn_triton_amd.py::bwd outputs")
print("dq:", dq.shape)
print("dk:", dk.shape)
print("dv:", dv.shape)
# --- Assertions ---
assert dq.shape == q.shape, f"[bwd] dq shape {dq.shape} != q shape {q.shape}"
assert dk.shape == k.shape, f"[bwd] dk shape {dk.shape} != k shape {k.shape}"
assert dv.shape == v.shape, f"[bwd] dv shape {dv.shape} != v shape {v.shape}"
# delta (softmax_d) : (B, Hq, Sq)
if SHAPE_EXPECTATIONS == "rounded":
expected_delta_shape = (q.shape[0], q.shape[2], round_multiple(q.shape[1], 128))
else:
expected_delta_shape = (q.shape[0], q.shape[2], q.shape[1])
assert (
delta.shape == expected_delta_shape
), f"[bwd] delta shape {delta.shape} != {expected_delta_shape}"
return dq, dk, dv, delta
def varlen_fwd(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
out: Optional[torch.Tensor],
cu_seqlens_q: torch.Tensor,
cu_seqlens_k: torch.Tensor,
seqused_k: Optional[torch.Tensor],
leftpad_k: Optional[torch.Tensor],
block_table_: Optional[torch.Tensor],
alibi_slopes: Optional[torch.Tensor],
max_seqlen_q: int,
max_seqlen_k: int,
dropout_p: float,
softmax_scale: float,
zero_tensors: bool,
causal: bool,
window_size_left: int,
window_size_right: int,
softcap: float,
return_softmax: bool,
gen_: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], torch.Tensor]:
if str(q.dtype).startswith("torch.float8"):
raise NotImplementedError(
"FP8 tensors are not supported in the AMD Triton FA2 interface (varlen_fwd). Use the FA3 path instead."
)
if softcap != 0.0:
raise NotImplementedError(
"softcap is not supported in varlen_fwd (expected 0.0)."
)
if leftpad_k is not None:
raise NotImplementedError(
"leftpad_k is not supported in AMD Triton FA2 varlen_fwd."
)
if block_table_ is not None:
raise NotImplementedError(
"block_table / paged attention is not supported in AMD Triton FA2 varlen_fwd."
)
if seqused_k is not None:
raise NotImplementedError(
"seqused_k is not supported in AMD Triton FA2 varlen_fwd."
)
if DEBUG:
print()
print("flash_attn_triton_amd.py::varlen_fwd")
print("q:", q.shape)
print("k:", k.shape)
print("v:", v.shape)
print("cu_seqlens_q:", cu_seqlens_q, cu_seqlens_q.shape)
print("cu_seqlens_k:", cu_seqlens_k, cu_seqlens_k.shape)
print("alibi_slopes:", alibi_slopes)
print("max_seqlen_q:", max_seqlen_q)
print("max_seqlen_k:", max_seqlen_k)
print("dropout_p:", dropout_p)
print("softmax_scale:", softmax_scale)
print("causal:", causal)
print("window_size_left:", window_size_left)
print("window_size_right:", window_size_right)
print("gen_:", gen_)
out = torch.zeros_like(q) if out is None else out.zero_()
# Layout and basic info for varlen
layout: Literal["bshd", "bhsd", "thd"] = "thd"
batch = len(cu_seqlens_q) - 1
total_q, nheads_q, _ = q.shape
# Create softmax_lse tensor - varlen always uses exact shape (Hq, Total_Q)
softmax_lse = torch.zeros((nheads_q, total_q), device=q.device, dtype=torch.float32)
# Create sd_mask tensor if needed
if return_softmax:
# sd_mask: (B, Hq, Sq, Sk) - shape based on expectations
if SHAPE_EXPECTATIONS == "rounded":
sd_mask = torch.zeros(
(
batch,
nheads_q,
round_multiple(max_seqlen_q, 128),
round_multiple(max_seqlen_k, 128),
),
device=q.device,
dtype=q.dtype,
)
else:
sd_mask = torch.zeros(
(batch, nheads_q, max_seqlen_q, max_seqlen_k),
device=q.device,
dtype=q.dtype,
)
else:
sd_mask = None
if alibi_slopes is not None:
if alibi_slopes.dim() == 1:
alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1)
assert alibi_slopes.is_cuda and alibi_slopes.dim() == 2
assert alibi_slopes.shape == (batch, nheads_q)
philox_seed, philox_offset = PHILOX_SEED, PHILOX_OFFSET
rng_state = torch.as_tensor([philox_seed, philox_offset])
# Inline checks (subset appropriate for varlen)
assert q.dim() == 3 and k.dim() == 3 and v.dim() == 3
assert q.shape[-1] == k.shape[-1] == v.shape[-1]
assert q.dtype == k.dtype == v.dtype
assert out.shape == q.shape
nheads_k = k.shape[1]
assert (nheads_q % nheads_k) == 0
# call implementation
if DEBUG:
print("Using Triton implementation")
attention_forward_prefill_triton_impl(
q,
k,
v,
out,
softmax_lse,
sd_mask,
softmax_scale,
alibi_slopes,
causal,
window_size_left,
window_size_right,
None,
layout,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
philox_seed,
philox_offset,
return_softmax,
USE_EXP2,
None,
None,
None,
)
if DEBUG:
print("varlen_fwd outputs")
print("out:", out, out.shape)
print("softmax_lse:", softmax_lse, softmax_lse.shape)
print("sd_mask:", sd_mask, sd_mask.shape if sd_mask is not None else None)
# --- Assertions ---
# out: (Total_Q, Hq, D)
assert (
out.shape == q.shape
), f"[varlen_fwd] out shape {out.shape} != q shape {q.shape}"
# softmax_lse: (Hq, Total_Q)
expected_lse_shape = (q.shape[1], q.shape[0])
assert (
softmax_lse.shape == expected_lse_shape
), f"[varlen_fwd] softmax_lse shape {softmax_lse.shape} != {expected_lse_shape}"
assert (
softmax_lse.dtype == torch.float32
), f"[varlen_fwd] softmax_lse dtype {softmax_lse.dtype} != torch.float32"
if return_softmax:
# sd_mask expected: (B, Hq, max_seqlen_q, max_seqlen_k)
assert (
sd_mask is not None
), "[varlen_fwd] return_softmax=True but sd_mask is None"
assert sd_mask.dim() == 4, f"[varlen_fwd] sd_mask dim {sd_mask.dim()} != 4"
batch = len(cu_seqlens_q) - 1
assert (
sd_mask.shape[0] == batch
), f"[varlen_fwd] sd_mask batch {sd_mask.shape[0]} != {batch}"
assert (
sd_mask.shape[1] == q.shape[1]
), f"[varlen_fwd] sd_mask nheads {sd_mask.shape[1]} != {q.shape[1]}"
if SHAPE_EXPECTATIONS == "rounded":
expected_sq = round_multiple(max_seqlen_q, 128)
expected_sk = round_multiple(max_seqlen_k, 128)
assert (
sd_mask.shape[2] == expected_sq and sd_mask.shape[3] == expected_sk
), f"[varlen_fwd] sd_mask shape {sd_mask.shape} != (B={batch}, Hq={q.shape[1]}, Sq={expected_sq}, Sk={expected_sk})"
else:
assert (
sd_mask.shape[2] == max_seqlen_q and sd_mask.shape[3] == max_seqlen_k
), f"[varlen_fwd] sd_mask shape {sd_mask.shape} != (B={batch}, Hq={q.shape[1]}, Sq={max_seqlen_q}, Sk={max_seqlen_k})"
else:
assert (
sd_mask is None
), "[varlen_fwd] return_softmax=False but sd_mask is not None"
return out, softmax_lse, sd_mask, rng_state
def varlen_bwd(
dout: torch.Tensor,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
out: torch.Tensor,
softmax_lse: torch.Tensor,
dq: Optional[torch.Tensor],
dk: Optional[torch.Tensor],
dv: Optional[torch.Tensor],
cu_seqlens_q: torch.Tensor,
cu_seqlens_k: torch.Tensor,
alibi_slopes: Optional[torch.Tensor],
max_seqlen_q: int,
max_seqlen_k: int,
dropout_p: float,
softmax_scale: float,
zero_tensors: bool,
causal: bool,
window_size_left: int,
window_size_right: int,
softcap: float,
deterministic: bool,
gen_: Optional[torch.Tensor] = None,
rng_state: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
if str(q.dtype).startswith("torch.float8"):
raise NotImplementedError(
"FP8 tensors are not supported in the AMD Triton FA2 interface (varlen_bwd). Use the FA3 path instead."
)
if softcap != 0.0:
raise NotImplementedError(
"softcap is not supported in varlen_bwd (expected 0.0)."
)
if DEBUG:
print()
print("varlen_bwd")
print("dout:", dout.shape)
print("q:", q.shape)
print("k:", k.shape)
print("v:", v.shape)
print("out:", out)
print("softmax_lse:", softmax_lse.shape)
print("dq:", dq.shape if dq is not None else None)
print("dk:", dk.shape if dk is not None else None)
print("dv:", dv.shape if dv is not None else None)
print("cu_seqlens_q:", cu_seqlens_q, cu_seqlens_q.shape)
print("cu_seqlens_k:", cu_seqlens_k, cu_seqlens_k.shape)
print("alibi_slopes:", alibi_slopes)
print("max_seqlen_q:", max_seqlen_q)
print("max_seqlen_k:", max_seqlen_k)
print("dropout_p:", dropout_p)
print("softmax_scale:", softmax_scale)
print("causal:", causal)
print("window_size_left:", window_size_left)
print("window_size_right:", window_size_right)
print("deterministic:", deterministic)
print("gen_:", gen_)
print("rng_state:", rng_state)
dq = torch.zeros_like(q) if dq is None else dq.zero_()
dk = torch.zeros_like(k) if dk is None else dk.zero_()
dv = torch.zeros_like(v) if dv is None else dv.zero_()
# get shape
batch = len(cu_seqlens_q) - 1
total_q, nheads_q, _ = q.shape
# Create delta tensor with shape based on expectations
# delta (softmax_d) : (Hq, Total_Q) or (Hq, Total_Q + 128*batch)
if SHAPE_EXPECTATIONS == "rounded":
delta = torch.zeros(
(nheads_q, total_q + 128 * batch), device=q.device, dtype=torch.float32
)
else:
delta = torch.zeros((nheads_q, total_q), device=q.device, dtype=torch.float32)
# Upstream change: base seeding logic on provided rng_state instead of dropout probability.
if rng_state is not None:
philox_seed, philox_offset = rng_state[0].item(), rng_state[1].item()
else:
philox_seed, philox_offset = None, None
if alibi_slopes is not None:
if alibi_slopes.dim() == 2:
pass
elif alibi_slopes.dim() == 1:
alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1)
else:
raise ValueError("Alibi can be (nheads,) or (batch_size, nheads).")
# call implementation
if DEBUG:
print(f"Using Triton implementation in {BWD_MODE} mode")
attention_backward_triton_impl(
do=dout,
q=q,
k=k,
v=v,
o=out,
softmax_lse=softmax_lse,
dq=dq,
dk=dk,
dv=dv,
delta=delta,
sm_scale=softmax_scale,
alibi_slopes=alibi_slopes,
causal=causal,
layout="thd",
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
seqused_q=None,
seqused_k=None,
dropout_p=dropout_p,
philox_seed=philox_seed,
philox_offset=philox_offset,
use_exp2=USE_EXP2,
mode=BWD_MODE,
)
if DEBUG:
print("varlen_bwd outputs")
print("delta:", delta, delta.shape)
print("dv:", dv, dv.shape)
print("dk:", dk, dk.shape)
print("dq:", dq, dq.shape)
# --- Assertions ---
assert dq.shape == q.shape, f"[varlen_bwd] dq shape {dq.shape} != q shape {q.shape}"
assert dk.shape == k.shape, f"[varlen_bwd] dk shape {dk.shape} != k shape {k.shape}"
assert dv.shape == v.shape, f"[varlen_bwd] dv shape {dv.shape} != v shape {v.shape}"
if SHAPE_EXPECTATIONS == "rounded":
batch = len(cu_seqlens_q) - 1
expected_delta_shape = (q.shape[1], q.shape[0] + 128 * batch)
else:
expected_delta_shape = (q.shape[1], q.shape[0]) # (Hq, Total_Q)
assert (
delta.shape == expected_delta_shape
), f"[varlen_bwd] delta shape {delta.shape} != {expected_delta_shape}"
return dq, dk, dv, delta
def fwd_kvcache(
q: torch.Tensor,
k_cache: torch.Tensor,
v_cache: torch.Tensor,
k: Optional[torch.Tensor],
v: Optional[torch.Tensor],
cache_seqlens: Optional[Union[int, torch.Tensor]],
rotary_cos: Optional[torch.Tensor],
rotary_sin: Optional[torch.Tensor],
cache_batch_idx: Optional[torch.Tensor],
cache_leftpad: Optional[torch.Tensor],
block_table: Optional[torch.Tensor],
alibi_slopes: Optional[torch.Tensor],
out: Optional[torch.Tensor],
softmax_scale: float,
causal: bool,
window_size_left: int,
window_size_right: int,
softcap: float,
rotary_interleaved: bool,
num_splits: int,
) -> tuple[torch.Tensor, torch.Tensor]:
if softcap != 0.0:
raise NotImplementedError(
"softcap is not supported in fwd_kvcache (expected 0.0)."
)
if num_splits not in (0, 1):
raise NotImplementedError(
"num_splits > 1 not supported in AMD Triton FA2 fwd_kvcache."
)
if DEBUG:
print()
print("flash_attn_triton_amd.py::fwd_kvcache inputs")
print("q:", q, q.shape)
print("k_cache:", k_cache, k_cache.shape)
print("v_cache:", v_cache, v_cache.shape)
print("k:", k, k.shape if k is not None else None)
print("v:", v, v.shape if v is not None else None)
print("cache_seqlens:", cache_seqlens)
print("rotary_cos:", rotary_cos)
print("rotary_sin:", rotary_sin)
print("cache_batch_idx:", cache_batch_idx)
print("cache_leftpad:", cache_leftpad)
print("block_table:", block_table)
print("alibi_slopes:", alibi_slopes)
print("out:", out)
print("softmax_scale:", softmax_scale)
print("causal:", causal)
print("window_size_left:", window_size_left)
print("window_size_right:", window_size_right)
print("softcap:", softcap)
print("rotary_interleaved:", rotary_interleaved)
print("num_splits:", num_splits)
# output
out = torch.zeros_like(q) if out is None else out.zero_()
# Basic layout info for decode path
layout: Literal["bshd"] = "bshd"
max_seqlen_q = q.shape[1]
max_seqlen_k = k_cache.shape[1]
cache_seqlens_tensor = (
torch.tensor(cache_seqlens, device=q.device)
if isinstance(cache_seqlens, int)
else cache_seqlens
)
window_left = (
int(window_size_left.item())
if isinstance(window_size_left, torch.Tensor)
else window_size_left
)
window_right = (
int(window_size_right.item())
if isinstance(window_size_right, torch.Tensor)
else window_size_right
)
k_new = k
v_new = v
# get shape
batch, seqlen_q, nheads_q, _ = q.shape
# Create softmax_lse tensor - decode always uses exact shape (B, Hq, Sq)
softmax_lse = torch.zeros(
(batch, nheads_q, seqlen_q), device=q.device, dtype=torch.float32
)
if alibi_slopes is not None:
if alibi_slopes.dim() == 1:
alibi_slopes = alibi_slopes.unsqueeze(0).expand(batch, -1)
assert alibi_slopes.is_cuda and alibi_slopes.dim() == 2
assert alibi_slopes.shape == (batch, nheads_q)
# launch kernel
if DEBUG:
print("Using Triton implementation")
attention_forward_decode_triton_impl(
q,
k_cache,
v_cache,
k_new,
v_new,
out,
softmax_lse,
softmax_scale,
causal,
window_left,
window_right,
alibi_slopes,
layout,
cache_seqlens_tensor,
cache_batch_idx,
block_table,
None,
None,
None,
rotary_cos=rotary_cos,
rotary_sin=rotary_sin,
rotary_interleaved=rotary_interleaved,
)
if DEBUG:
print("out:", out, out.shape)
print("softmax_lse:", softmax_lse, softmax_lse.shape)
# --- Assertions ---
assert (
out.shape == q.shape
), f"[fwd_kvcache] out shape {out.shape} != q shape {q.shape}"
expected_lse_shape = (q.shape[0], q.shape[2], q.shape[1])
assert (
softmax_lse.shape == expected_lse_shape
), f"[fwd_kvcache] softmax_lse shape {softmax_lse.shape} != {expected_lse_shape}"
assert (
softmax_lse.dtype == torch.float32
), f"[fwd_kvcache] softmax_lse dtype {softmax_lse.dtype} != torch.float32"
return out, softmax_lse
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/flash_attn_triton_amd/interface_v2.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 763,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/flash_attn_triton_amd/interface_v3.py | import os
import warnings
import torch
from typing import Literal, Optional, Union, Tuple
from .fwd_prefill import attention_forward_prefill_triton_impl
from .fwd_decode import attention_forward_decode_triton_impl
from .bwd import attention_backward_triton_impl
from .utils import (
DEBUG,
USE_EXP2,
BWD_MODE,
PHILOX_SEED,
PHILOX_OFFSET,
is_fp8,
)
def fwd(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
k_new: Optional[torch.Tensor],
v_new: Optional[torch.Tensor],
qv: Optional[torch.Tensor],
out: Optional[torch.Tensor],
cu_seqlens_q: Optional[torch.Tensor],
cu_seqlens_k: Optional[torch.Tensor],
cu_seqlens_k_new: Optional[torch.Tensor],
seqused_q: Optional[torch.Tensor],
seqused_k: Optional[torch.Tensor],
max_seqlen_q: Optional[int],
max_seqlen_k: Optional[int],
page_table: Optional[torch.Tensor],
kv_batch_idx: Optional[torch.Tensor],
leftpad_k: Optional[torch.Tensor],
rotary_cos: Optional[torch.Tensor],
rotary_sin: Optional[torch.Tensor],
seqlens_rotary: Optional[torch.Tensor],
q_descale: Optional[torch.Tensor],
k_descale: Optional[torch.Tensor],
v_descale: Optional[torch.Tensor],
softmax_scale: float,
causal: bool,
window_size_left: int,
window_size_right: int,
attention_chunk: int,
softcap: float,
rotary_interleaved: bool,
scheduler_metadata: None = None,
num_splits: int = 1,
pack_gqa: Optional[bool] = None,
sm_margin: int = 0,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Flash Attention v3 forward pass compatible interface for AMD Triton implementation.
This function maps v3 parameters to the existing AMD Triton implementation.
"""
if DEBUG:
print()
print("interface_fa_v3.py::fwd inputs")
print("q:", q.shape)
print("k:", k.shape)
print("v:", v.shape)
print("k_new:", k_new.shape if k_new is not None else None)
print("v_new:", v_new.shape if v_new is not None else None)
print("qv:", qv.shape if qv is not None else None)
print("out:", out.shape if out is not None else None)
print("cu_seqlens_q:", cu_seqlens_q.shape if cu_seqlens_q is not None else None)
print("cu_seqlens_k:", cu_seqlens_k.shape if cu_seqlens_k is not None else None)
print("cu_seqlens_k_new:", cu_seqlens_k_new.shape if cu_seqlens_k_new is not None else None)
print("seqused_q:", seqused_q.shape if seqused_q is not None else None)
print("seqused_k:", seqused_k.shape if seqused_k is not None else None)
print("max_seqlen_q:", max_seqlen_q)
print("max_seqlen_k:", max_seqlen_k)
print("page_table:", page_table.shape if page_table is not None else None)
print("kv_batch_idx:", kv_batch_idx.shape if kv_batch_idx is not None else None)
print("leftpad_k:", leftpad_k.shape if leftpad_k is not None else None)
print("rotary_cos:", rotary_cos.shape if rotary_cos is not None else None)
print("rotary_sin:", rotary_sin.shape if rotary_sin is not None else None)
print("seqlens_rotary:", seqlens_rotary.shape if seqlens_rotary is not None else None)
print("q_descale:", q_descale.shape if q_descale is not None else None)
print("k_descale:", k_descale.shape if k_descale is not None else None)
print("v_descale:", v_descale.shape if v_descale is not None else None)
print("softmax_scale:", softmax_scale)
print("causal:", causal)
print("window_size_left:", window_size_left)
print("window_size_right:", window_size_right)
print("attention_chunk:", attention_chunk)
print("softcap:", softcap)
print("rotary_interleaved:", rotary_interleaved)
print("scheduler_metadata:", scheduler_metadata)
print("num_splits:", num_splits)
print("pack_gqa:", pack_gqa)
print("sm_margin:", sm_margin)
# Handle qv packed input
if qv is not None:
raise NotImplementedError(
"QV packed input is not yet supported in the AMD Triton backend"
)
# Handle softcap
if softcap != 0.0:
raise NotImplementedError(
f"Softcap is not yet supported in the AMD Triton backend (got softcap={softcap}, expected 0.0)"
)
# Handle attention_chunk
if attention_chunk != 0 and attention_chunk != 1:
raise NotImplementedError(
f"attention_chunk is not yet supported in the AMD Triton backend (got attention_chunk={attention_chunk})"
)
# Handle scheduler metadata
if scheduler_metadata is not None:
raise NotImplementedError(
"Scheduler metadata is not yet supported in the AMD Triton backend"
)
# Handle pack_gqa
if pack_gqa is not None and pack_gqa is not False:
raise NotImplementedError(
f"pack_gqa is not yet supported in the AMD Triton backend (got pack_gqa={pack_gqa})"
)
# Handle num_splits
if num_splits != 1:
raise NotImplementedError(
f"Split attention (num_splits > 1) is not yet supported in the AMD Triton backend (got num_splits={num_splits})"
)
# Handle sm_margin
if sm_margin != 0:
raise NotImplementedError(
f"sm_margin is not yet supported in the AMD Triton backend (got sm_margin={sm_margin}, expected 0)"
)
# Handle leftpad_k
if leftpad_k is not None:
raise NotImplementedError(
"Left padding (leftpad_k) is not yet supported in the AMD Triton backend"
)
# Handle cu_seqlens_k_new
if cu_seqlens_k_new is not None:
raise NotImplementedError(
"cu_seqlens_k_new is not yet supported in the AMD Triton backend"
)
# establish layout / varlen & max seq lens
if cu_seqlens_q is not None:
if len(q.shape) != 3:
raise ValueError(
f"cu_seqlens_q provided but q has shape {q.shape}, expected 3D tensor for varlen"
)
layout: Literal["bshd", "thd"] = "thd"
cu_seqlens_q_local = cu_seqlens_q
assert max_seqlen_q is not None, "max_seqlen_q required for varlen mode"
max_seqlens_q_local = max_seqlen_q
if cu_seqlens_k is not None:
cu_seqlens_k_local = cu_seqlens_k
assert max_seqlen_k is not None, "max_seqlen_k required when cu_seqlens_k provided"
max_seqlens_k_local = max_seqlen_k
else:
cu_seqlens_k_local = None
if len(k.shape) == 4:
max_seqlens_k_local = k.shape[1]
else:
assert max_seqlen_k is not None, "max_seqlen_k required for varlen mode"
max_seqlens_k_local = max_seqlen_k
else:
layout = "bshd"
cu_seqlens_q_local = None
cu_seqlens_k_local = None
max_seqlens_q_local = q.shape[1] if max_seqlen_q is None else max_seqlen_q
max_seqlens_k_local = k.shape[1] if max_seqlen_k is None else max_seqlen_k
# Now determine if we should use decode or prefill kernel
# Decode kernel should be used for KV cache scenarios where:
# 1. k_new/v_new are provided - incremental KV cache update (primary KV cache indicator)
# 2. kv_batch_idx is provided - KV cache batch indexing (primary KV cache indicator)
# 3. seqused_k without seqused_q - indicates KV cache fill levels (not varlen masking)
# Note: In varlen, both seqused_q and seqused_k are used for sequence masking
# In KV cache, only seqused_k is used to track cache fill levels
# Detect KV cache scenarios:
# - Clear KV cache indicators (k_new, v_new, kv_batch_idx)
# - OR seqused_k without seqused_q (KV cache fill tracking, not varlen masking)
use_decode = (
k_new is not None # Have new KV to append (KV cache indicator)
or v_new is not None # Have new KV to append (KV cache indicator)
or kv_batch_idx is not None # Have KV cache batch indexing (KV cache indicator)
or (
seqused_k is not None and seqused_q is None
) # KV cache fill levels (not varlen)
)
# Check for unsupported features with decode kernel
if use_decode:
if layout == "thd":
raise NotImplementedError(
"Varlen is not yet supported with the decode kernel in the AMD Triton backend"
)
if kv_batch_idx is not None:
raise NotImplementedError(
"kv_batch_idx is not yet supported with the decode kernel in the AMD Triton backend"
)
if out is None:
# NOTE: Using types that are lower precision than float32 such as bfloat16 for fp8 causes mismatches on a small set of tests.
out_dtype = torch.float32 if is_fp8([q, k, v]) else q.dtype
if layout == "bshd":
out = torch.zeros(
q.shape[0],
q.shape[1],
q.shape[2],
v.shape[-1],
dtype=out_dtype,
device=q.device,
)
elif layout == "thd":
out = torch.zeros(
q.shape[0], q.shape[1], v.shape[-1], dtype=out_dtype, device=q.device
)
else:
raise ValueError(
f"Unsupported layout: {layout}. Only 'bshd' and 'thd' layouts are supported."
)
else:
out = out.zero_()
# Handle causal mask
causal_flag = bool(causal)
# Handle alibi slopes
alibi_slopes = None
# Handle dropout
dropout_p = 0.0
return_softmax = False
philox_seed = PHILOX_SEED
philox_offset = PHILOX_OFFSET
# Call implementation
if DEBUG:
print("Using Triton implementation")
if use_decode:
if DEBUG:
print(
f"Using Decode Triton implementation (cache_seqlens={seqused_k is not None}, k_new={k_new is not None}, v_new={v_new is not None}, kv_batch_idx={kv_batch_idx is not None})"
)
# Create softmax_lse tensor for decode - always exact shape (B, Hq, Sq)
batch, seqlen_q, nheads_q, _ = q.shape
softmax_lse = torch.zeros(
(batch, nheads_q, seqlen_q), device=q.device, dtype=torch.float32
)
# Decode only supports bshd layout
assert layout == "bshd", f"decode requires bshd layout, got {layout}"
attention_forward_decode_triton_impl(
q,
k,
v,
k_new,
v_new,
out,
softmax_lse,
softmax_scale,
causal_flag,
window_size_left,
window_size_right,
alibi_slopes,
layout,
seqused_k,
kv_batch_idx,
page_table,
q_descale,
k_descale,
v_descale,
rotary_cos=rotary_cos,
rotary_sin=rotary_sin,
rotary_interleaved=rotary_interleaved,
seqlens_rotary=seqlens_rotary,
)
else:
if DEBUG:
print("Using Prefill Triton implementation")
# Create softmax_lse tensor - FA3 always uses exact shapes
if layout == "thd":
# varlen: (Hq, Total_Q)
total_q, nheads_q, _ = q.shape
softmax_lse = torch.zeros(
(nheads_q, total_q), device=q.device, dtype=torch.float32
)
else:
# bshd: (B, Hq, Sq)
batch, seqlen_q, nheads_q, _ = q.shape
softmax_lse = torch.zeros(
(batch, nheads_q, seqlen_q), device=q.device, dtype=torch.float32
)
# sd_mask is not returned in v3 interface
sd_mask = None
attention_forward_prefill_triton_impl(
q,
k,
v,
out,
softmax_lse,
sd_mask,
softmax_scale,
alibi_slopes,
causal_flag,
window_size_left,
window_size_right,
None,
layout,
cu_seqlens_q_local,
cu_seqlens_k_local,
max_seqlens_q_local,
max_seqlens_k_local,
dropout_p,
philox_seed,
philox_offset,
return_softmax,
USE_EXP2,
q_descale,
k_descale,
v_descale,
seqused_q,
seqused_k,
rotary_cos=rotary_cos,
rotary_sin=rotary_sin,
rotary_interleaved=rotary_interleaved,
seqlens_rotary=seqlens_rotary,
)
if DEBUG:
print("interface_fa_v3.py::fwd outputs")
print("out:", out.shape)
print("softmax_lse:", softmax_lse.shape)
# --- Assertions (FA3 always expects exact shapes) ---
# out: same shape as q except last dim is v's head_dim
if layout == "thd":
# varlen: (Total_Q, Hq, Dv)
assert (
out.shape[0] == q.shape[0]
), f"[fwd_v3] out.shape[0] {out.shape[0]} != q.shape[0] {q.shape[0]}"
assert (
out.shape[1] == q.shape[1]
), f"[fwd_v3] out.shape[1] {out.shape[1]} != q.shape[1] {q.shape[1]}"
assert (
out.shape[2] == v.shape[-1]
), f"[fwd_v3] out.shape[2] {out.shape[2]} != v.shape[-1] {v.shape[-1]}"
else:
# bshd: (B, Sq, Hq, Dv)
assert (
out.shape[0] == q.shape[0]
), f"[fwd_v3] out.shape[0] {out.shape[0]} != q.shape[0] {q.shape[0]}"
assert (
out.shape[1] == q.shape[1]
), f"[fwd_v3] out.shape[1] {out.shape[1]} != q.shape[1] {q.shape[1]}"
assert (
out.shape[2] == q.shape[2]
), f"[fwd_v3] out.shape[2] {out.shape[2]} != q.shape[2] {q.shape[2]}"
assert (
out.shape[3] == v.shape[-1]
), f"[fwd_v3] out.shape[3] {out.shape[3]} != v.shape[-1] {v.shape[-1]}"
# softmax_lse dtype
assert (
softmax_lse.dtype == torch.float32
), f"[fwd_v3] softmax_lse dtype {softmax_lse.dtype} != torch.float32"
# softmax_lse shape depends on layout
expected_lse_shape: tuple[int, ...]
if layout == "thd":
# varlen: (Hq, Total_Q)
expected_lse_shape = (q.shape[1], q.shape[0])
else:
# bshd: (B, Hq, Sq)
expected_lse_shape = (q.shape[0], q.shape[2], q.shape[1])
assert (
softmax_lse.shape == expected_lse_shape
), f"[fwd_v3] softmax_lse shape {softmax_lse.shape} != {expected_lse_shape}"
# Return format compatible with v3
# V3 returns (out, softmax_lse, out_accum, softmax_lse_accum)
# out_accum and softmax_lse_accum are None for Triton AMD (no split-k accumulation)
return out, softmax_lse, None, None
def bwd(
dout: torch.Tensor,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
out: torch.Tensor,
softmax_lse: torch.Tensor,
dq: Optional[torch.Tensor],
dk: Optional[torch.Tensor],
dv: Optional[torch.Tensor],
cu_seqlens_q: Optional[torch.Tensor],
cu_seqlens_k: Optional[torch.Tensor],
seqused_q: Optional[torch.Tensor],
seqused_k: Optional[torch.Tensor],
max_seqlen_q: Optional[int],
max_seqlen_k: Optional[int],
softmax_scale: float,
causal: bool,
window_size_left: int,
window_size_right: int,
softcap: float,
deterministic: bool,
sm_margin: int = 0,
) -> Tuple[torch.Tensor]:
"""
Flash Attention v3 backward pass compatible interface for AMD Triton implementation.
This function maps v3 parameters to the existing AMD Triton implementation.
"""
if DEBUG:
print()
print("interface_fa_v3.py::bwd inputs")
print("dout:", dout.shape)
print("q:", q.shape)
print("k:", k.shape)
print("v:", v.shape)
print("out:", out.shape)
print("softmax_lse:", softmax_lse.shape)
print("dq:", dq.shape if dq is not None else None)
print("dk:", dk.shape if dk is not None else None)
print("dv:", dv.shape if dv is not None else None)
print("cu_seqlens_q:", cu_seqlens_q.shape if cu_seqlens_q is not None else None)
print("cu_seqlens_k:", cu_seqlens_k.shape if cu_seqlens_k is not None else None)
print("seqused_q:", seqused_q.shape if seqused_q is not None else None)
print("seqused_k:", seqused_k.shape if seqused_k is not None else None)
print("max_seqlen_q:", max_seqlen_q)
print("max_seqlen_k:", max_seqlen_k)
print("softmax_scale:", softmax_scale)
print("causal:", causal)
print("window_size_left:", window_size_left)
print("window_size_right:", window_size_right)
print("softcap:", softcap)
print("deterministic:", deterministic)
print("sm_margin:", sm_margin)
# Check for unsupported features in backward pass
# Handle sliding window - backward doesn't support it yet
is_sliding_window = (window_size_left >= 0) or (window_size_right >= 0)
if is_sliding_window:
raise NotImplementedError(
f"Sliding window attention is not yet supported in the AMD Triton backward pass "
f"(window_size_left={window_size_left}, window_size_right={window_size_right}). "
f"Use window_size=(-1, -1) for full attention."
)
# Handle softcap
if softcap != 0.0:
raise NotImplementedError(
f"Softcap is not yet supported in the AMD Triton backend backward pass (got softcap={softcap}, expected 0.0)"
)
# Handle sm_margin
if sm_margin != 0:
raise NotImplementedError(
f"sm_margin is not yet supported in the AMD Triton backend backward pass (got sm_margin={sm_margin}, expected 0)"
)
# Initialize gradient tensors if not provided
# NOTE: Using types that are lower precision than float32 such as bfloat16 for fp8 causes mismatches on a small set of tests.
grad_dtype = torch.float32 if is_fp8([q, k, v]) else q.dtype
dq = torch.zeros_like(q, dtype=grad_dtype) if dq is None else dq.zero_()
dk = torch.zeros_like(k, dtype=grad_dtype) if dk is None else dk.zero_()
dv = torch.zeros_like(v, dtype=grad_dtype) if dv is None else dv.zero_()
# Determine layout based on cu_seqlens
layout: Literal["bshd", "bhsd", "thd"]
if cu_seqlens_q is not None and cu_seqlens_k is not None:
# Variable length sequence mode
layout = "thd"
batch = len(cu_seqlens_q) - 1
total_q, nheads_q, _ = q.shape
# Create delta tensor - varlen: (Hq, Total_Q)
delta = torch.zeros((nheads_q, total_q), device=q.device, dtype=torch.float32)
else:
# Regular batch mode
layout = "bshd"
batch, seqlen_q, nheads_q, _ = q.shape
max_seqlen_q = q.shape[1] if max_seqlen_q is None else max_seqlen_q
max_seqlen_k = k.shape[1] if max_seqlen_k is None else max_seqlen_k
# Create delta tensor - bshd: (B, Hq, Sq)
delta = torch.zeros(
(batch, nheads_q, seqlen_q), device=q.device, dtype=torch.float32
)
# V3 backward doesn't have dropout or alibi slopes
dropout_p = 0.0
philox_seed, philox_offset = None, None
alibi_slopes = None
# Call implementation
if DEBUG:
print(f"Using Triton implementation in {BWD_MODE} mode")
attention_backward_triton_impl(
do=dout,
q=q,
k=k,
v=v,
o=out,
softmax_lse=softmax_lse,
dq=dq,
dk=dk,
dv=dv,
delta=delta,
sm_scale=softmax_scale,
alibi_slopes=alibi_slopes,
causal=causal,
layout=layout,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
seqused_q=seqused_q,
seqused_k=seqused_k,
dropout_p=dropout_p,
philox_seed=philox_seed,
philox_offset=philox_offset,
use_exp2=USE_EXP2,
mode=BWD_MODE,
)
if DEBUG:
print("interface_fa_v3.py::bwd outputs")
print("dq:", dq.shape)
print("dk:", dk.shape)
print("dv:", dv.shape)
print("delta:", delta.shape)
# --- Assertions (FA3 always expects exact shapes) ---
# Gradients should match input shapes
assert dq.shape == q.shape, f"[bwd_v3] dq shape {dq.shape} != q shape {q.shape}"
assert dk.shape == k.shape, f"[bwd_v3] dk shape {dk.shape} != k shape {k.shape}"
assert dv.shape == v.shape, f"[bwd_v3] dv shape {dv.shape} != v shape {v.shape}"
# delta (softmax_d) should match softmax_lse shape
assert (
delta.dtype == torch.float32
), f"[bwd_v3] delta dtype {delta.dtype} != torch.float32"
expected_delta_shape: tuple[int, ...]
if layout == "thd":
# varlen: (Hq, Total_Q)
expected_delta_shape = (q.shape[1], q.shape[0])
else:
# bshd: (B, Hq, Sq)
expected_delta_shape = (q.shape[0], q.shape[2], q.shape[1])
assert (
delta.shape == expected_delta_shape
), f"[bwd_v3] delta shape {delta.shape} != {expected_delta_shape}"
# V3 expects (softmax_d, *rest)
# delta is the softmax_d in this case
return (delta,)
def fwd_combine(
out_partial: torch.Tensor,
lse_partial: torch.Tensor,
out: Optional[torch.Tensor] = None,
out_dtype: Optional[torch.dtype] = None,
) -> "torch.Tensor":
"""
Combine partial outputs from split attention computation.
This is used when num_splits > 1 to combine the partial results.
Args:
out_partial: Partial output tensor from split computation
lse_partial: Partial log-sum-exp tensor
out: Optional output tensor to write to
out_dtype: Optional dtype for output
Returns:
Combined output tensor
"""
raise NotImplementedError(
"fwd_combine is not yet implemented in the AMD Triton backend"
)
def get_scheduler_metadata(
batch_size: int,
max_seqlen_q: int,
max_seqlen_k: int,
num_heads_q: int,
num_heads_kv: int,
headdim: int,
headdim_v: int,
qkv_dtype: torch.dtype,
cache_seqlens: torch.Tensor,
cu_seqlens_q: Optional[torch.Tensor] = None,
cu_seqlens_k: Optional[torch.Tensor] = None,
cu_seqlens_k_new: Optional[torch.Tensor] = None,
seqused_q: Optional[torch.Tensor] = None,
cache_leftpad: Optional[torch.Tensor] = None,
page_size: Optional[int] = None,
max_seqlen_k_new: int = 0,
causal: bool = False,
window_size_left: int = -1,
window_size_right: int = -1,
attention_chunk: int = 0,
has_softcap: bool = False,
num_splits: int = 0,
pack_gqa: Optional[bool] = None,
sm_margin: int = 0,
) -> None:
"""
Get scheduler metadata for optimized kernel selection.
This function is used to precompute metadata for kernel scheduling in FA3.
The AMD Triton backend currently doesn't use scheduler metadata, so this
raises an error.
Args:
Various attention parameters used for scheduling decisions
Returns:
None - scheduler metadata is not used in AMD Triton backend
"""
raise NotImplementedError(
"get_scheduler_metadata is not supported in the AMD Triton backend yet."
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/flash_attn_triton_amd/interface_v3.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 582,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:hopper/test_flash_attn_triton_amd.py | import os
import math
import itertools
import pytest
import torch
import torch.nn.functional as F
from torch._C import parse_schema
from einops import rearrange, repeat
try:
from flash_attn.layers.rotary import apply_rotary_emb
except ImportError:
apply_rotary_emb = None
from padding import pad_input, unpad_input
from test_util import (
attention_ref,
generate_qkv,
generate_random_padding_mask,
)
from flash_attn_interface import flash_attn_func, flash_attn_varlen_func, flash_attn_combine
from flash_attn_interface import flash_attn_with_kvcache, get_scheduler_metadata
DISABLE_BACKWARD = os.getenv("FLASH_ATTENTION_DISABLE_BACKWARD", "FALSE") == "TRUE"
DISABLE_SPLIT = os.getenv("FLASH_ATTENTION_DISABLE_SPLIT", "TRUE") == "TRUE"
DISABLE_PAGEDKV = os.getenv("FLASH_ATTENTION_DISABLE_PAGEDKV", "FALSE") == "TRUE"
DISABLE_APPENDKV = os.getenv("FLASH_ATTENTION_DISABLE_APPENDKV", "FALSE") == "TRUE"
DISABLE_LOCAL = os.getenv("FLASH_ATTENTION_DISABLE_LOCAL", "TRUE") == "TRUE"
DISABLE_SOFTCAP = os.getenv("FLASH_ATTENTION_DISABLE_SOFTCAP", "TRUE") == "TRUE"
DISABLE_PACKGQA = os.getenv("FLASH_ATTENTION_DISABLE_PACKGQA", "TRUE") == "TRUE"
DISABLE_FP16 = os.getenv("FLASH_ATTENTION_DISABLE_FP16", "FALSE") == "TRUE"
DISABLE_FP8 = os.getenv("FLASH_ATTENTION_DISABLE_FP8", "FALSE") == "TRUE" or torch.cuda.get_device_capability("cuda")[0] < 9
DISABLE_HDIM64 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM64", "FALSE") == "TRUE"
DISABLE_HDIM96 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM96", "FALSE") == "TRUE"
DISABLE_HDIM128 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM128", "FALSE") == "TRUE"
DISABLE_HDIM192 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM192", "FALSE") == "TRUE"
DISABLE_HDIM256 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM256", "FALSE") == "TRUE"
COMPILED_HDIMS = (
[]
+ ([64] if not DISABLE_HDIM64 else [])
+ ([96] if not DISABLE_HDIM96 else [])
+ ([128] if not DISABLE_HDIM128 else [])
+ ([192] if not DISABLE_HDIM192 else [])
+ ([256] if not DISABLE_HDIM256 else [])
)
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
@pytest.mark.parametrize("dtype", [torch.bfloat16] + ([torch.float16] if not DISABLE_FP16 else []) + ([torch.float8_e4m3fn] if not DISABLE_FP8 else []))
# @pytest.mark.parametrize("dtype", [torch.bfloat16])
# @pytest.mark.parametrize("dtype", [torch.float8_e4m3fn])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["mha"])
# @pytest.mark.parametrize("has_qv", [False, True])
@pytest.mark.parametrize("has_qv", [False])
# @pytest.mark.parametrize("deterministic", [False, True])
@pytest.mark.parametrize("deterministic", [False])
@pytest.mark.parametrize("softcap", [0.0] + ([15.0] if not DISABLE_SOFTCAP else []))
# @pytest.mark.parametrize("softcap", [0.0])
@pytest.mark.parametrize("local", [False] + ([True] if not DISABLE_LOCAL else []))
# @pytest.mark.parametrize("local", [False])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [True])
# @pytest.mark.parametrize("V_colmajor", [False, True])
@pytest.mark.parametrize("V_colmajor", [False])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192, 256])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
# @pytest.mark.parametrize("d", [64, 128, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128])
# @pytest.mark.parametrize("d", [64, 96, 128, 192])
@pytest.mark.parametrize("d", COMPILED_HDIMS)
# @pytest.mark.parametrize("d", [128])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(1, 1),
(64, 128),
(128, 192),
(256, 256),
(239, 1),
(799, 3),
(113, 203),
(113, 128),
(128, 217),
(113, 211),
(108, 256),
(256, 512),
(384, 256),
(640, 128),
(512, 256),
(1024, 1024),
(1023, 1024),
(1024, 1023),
(4096, 4096),
(4224, 4224),
],
)
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(128, 128)])
def test_flash_attn_output(
seqlen_q, seqlen_k, d, causal, local, softcap, V_colmajor, deterministic, has_qv, mha_type, dtype
):
if V_colmajor and (seqlen_k % 16 != 0 or dtype != torch.float8_e4m3fn):
pytest.skip("V_colmajor requires seqlen_k to be a multiple of 16 and dtype to be float8_e4m3fn")
device = "cuda"
# set seed
torch.random.manual_seed(0)
# batch_size = 40
# nheads = 16
batch_size = 9 if seqlen_k <= 2048 else 2
# batch_size = 1
nheads = 6
# nheads = 1
nheads_kv = nheads if mha_type == "mha" else (2 if mha_type == "gqa" else 1)
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
if dtype == torch.float8_e4m3fn:
dv_vals = [d]
attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0] if not DISABLE_LOCAL else [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
q_ref = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref)
if softcap > 0.0:
# Ensure the values of qk are at least within softcap range.
q_ref = (q_ref * softcap / 4)
q_ref = q_ref.to(dtype).to(dtype_ref).requires_grad_()
k_ref = torch.randn(batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref).requires_grad_()
v_ref = torch.randn(batch_size, seqlen_k, nheads_kv, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref).requires_grad_()
if has_qv:
qv_ref = torch.randn(batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
else:
qv_ref = None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,)).tolist()
# window_size = (-1, -1) if not local else (16, 0)
if dtype == torch.float8_e4m3fn:
q_descale, k_descale, v_descale = [torch.rand(batch_size, nheads_kv, device=device, dtype=torch.float32) * 2 for _ in range(3)]
else:
q_descale, k_descale, v_descale = None, None, None
q, k, v = [x.detach().to(dtype).requires_grad_() for x in (q_ref, k_ref, v_ref)]
qv = qv_ref.detach().to(dtype).requires_grad_() if has_qv else None
if V_colmajor:
v = rearrange(rearrange(v.detach(), "b s h d -> b h d s").contiguous(), "b h d s -> b s h d").requires_grad_()
out_ref, attn_ref = attention_ref(
q_ref,
k_ref,
v_ref,
None,
None,
causal=causal,
qv=qv_ref,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap
)
out_pt, attn_pt = attention_ref(
q_ref,
k_ref,
v_ref,
None,
None,
causal=causal,
qv=qv_ref,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
upcast=False,
reorder_ops=True,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None,
)
# qk = torch.einsum('bshd,bthd->bhst', q_ref, k_ref).float()
# if qv is not None:
# qk += torch.einsum('bshd,bthd->bhst', qv_ref, v_ref).float()
# m = qk.amax(-1, keepdim=True)
# s_tmp = torch.exp((qk - m) / math.sqrt(d))
# exp_sum = s_tmp.sum(-1)
# qk = torch.einsum('bthd,bshd->bhts', q_ref.float() / math.sqrt(d), k_ref.float())
# lse_ref = torch.logsumexp(qk, dim=-1)
# Numerical error if we just do any arithmetic on out_ref
fwd_atol = 2 * (out_ref + 0.3 - 0.3 - out_ref).abs().max().item()
rtol = 2 if softcap == 0.0 else 3
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
pack_gqa_vals = [False, True] if not DISABLE_PACKGQA else [False]
num_splits_vals = [1, 3] if not DISABLE_SPLIT else [1]
for pack_gqa, num_splits in itertools.product(pack_gqa_vals, num_splits_vals):
out = flash_attn_func(
q,
k,
v,
causal=causal,
qv=qv,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
pack_gqa=pack_gqa,
num_splits=num_splits
)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
# if not causal:
# print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (out - out_ref).abs().max().item() <= rtol * (out_pt - out_ref).abs().max().item() + fwd_atol
if (
not DISABLE_BACKWARD
and dtype != torch.float8_e4m3fn
and not V_colmajor
and not has_qv
and not dv > 256
and not attention_chunk != 0
):
g = torch.randn_like(out)
do_o = ((g.float() * out.float()).sum(-1)).transpose(1, 2)
# import flash_attn_3_cuda
# dq, dk, dv, softmax_d, dq_accum, dk_accum, dv_accum = flash_attn_3_cuda.bwd(
# g,
# q,
# k,
# v,
# out,
# lse,
# None,
# None,
# None,
# d ** (-0.5),
# causal,
# window_size[0], window_size[1],
# softcap,
# deterministic,
# 0, # sm_margin
# )
dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
# print(f"dO_O max diff: {(softmax_d - do_o).abs().max().item()}")
# assert (softmax_d - do_o).abs().max().item() <= 1e-5
# assert dq_accum.abs().max().item() == 0.0
# dS = torch.einsum('bthd,bshd->bhts', g.float(), v.float())
# P = torch.softmax(qk, -1)
# dP = P * (dS - do_o.transpose(1, 2).unsqueeze(1))
# dQ = torch.einsum('bhts,bshd->bthd', dP, k.float())
# dV = torch.einsum('bhts,bthd->bshd', P, g.float())
# dK = torch.einsum('bhts,bthd->bshd', dP, q.float())
# dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
dq_ref, dk_ref, dv_ref = torch.autograd.grad(out_ref, (q_ref, k_ref, v_ref), g)
dq_pt, dk_pt, dv_pt = torch.autograd.grad(out_pt, (q_ref, k_ref, v_ref), g)
print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
# breakpoint()
dq_atol = 2 * (dq_ref + 0.3 - 0.3 - dq_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dq - dq_ref).abs().max().item() <= rtol * (dq_pt - dq_ref).abs().max().item() + dq_atol
dk_atol = 2 * (dk_ref + 0.3 - 0.3 - dk_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dk - dk_ref).abs().max().item() <= rtol * (dk_pt - dk_ref).abs().max().item() + dk_atol
dv_atol = 2 * (dv_ref + 0.3 - 0.3 - dv_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dv - dv_ref).abs().max().item() <= rtol * (dv_pt - dv_ref).abs().max().item() + dv_atol
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
@pytest.mark.parametrize("dtype", [torch.bfloat16] + ([torch.float16] if not DISABLE_FP16 else []) + ([torch.float8_e4m3fn] if not DISABLE_FP8 else []))
# @pytest.mark.parametrize("dtype", [torch.bfloat16])
# @pytest.mark.parametrize("dtype", [torch.float8_e4m3fn])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["mha"])
# @pytest.mark.parametrize("has_qv", [False, True])
@pytest.mark.parametrize("has_qv", [False])
# @pytest.mark.parametrize("deterministic", [False, True])
@pytest.mark.parametrize("deterministic", [False])
@pytest.mark.parametrize("softcap", [0.0] + ([15.0] if not DISABLE_SOFTCAP else []))
# @pytest.mark.parametrize("softcap", [0.0])
@pytest.mark.parametrize("local", [False] + ([True] if not DISABLE_LOCAL else []))
# @pytest.mark.parametrize("local", [False])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [False])
@pytest.mark.parametrize("add_unused_qkv", [False, True])
# @pytest.mark.parametrize("add_unused_qkv", [True])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192, 256])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128])
# @pytest.mark.parametrize("d", [64, 96, 128])
@pytest.mark.parametrize("d", COMPILED_HDIMS)
# @pytest.mark.parametrize("d", [128])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(1, 1),
(1, 3),
(2, 1),
(511, 1),
(3, 513),
(64, 128),
(128, 128),
(256, 256),
(113, 203),
(128, 217),
(113, 211),
(108, 256),
(256, 512),
(307, 256),
(640, 128),
(512, 256),
(1024, 1024),
(1023, 1024),
(1024, 1023),
(2048, 2048),
],
)
def test_flash_attn_varlen_output(
seqlen_q, seqlen_k, d, add_unused_qkv, causal, local, softcap, deterministic, has_qv, mha_type, dtype
):
device = "cuda"
# set seed
torch.random.manual_seed(seqlen_q + seqlen_k + d + int(causal) * 2 + int(local))
# batch_size = 40
# nheads = 16
batch_size = 9 if seqlen_q <= 2048 else 2
nheads = 6
# batch_size = 2
# nheads = 1
nheads_kv = nheads if mha_type == "mha" else (2 if mha_type == "gqa" else 1)
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
if dtype == torch.float8_e4m3fn:
dv_vals = [d]
attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0] if seqlen_q <= seqlen_k and not DISABLE_LOCAL else [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
q_ref = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref)
if softcap > 0.0:
# Ensure the values of qk are at least within softcap range.
q_ref = (q_ref * softcap / 4).detach().requires_grad_()
q_ref = q_ref.to(dtype).to(dtype_ref).requires_grad_()
k_ref = torch.randn(batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref).requires_grad_()
v_ref = torch.randn(batch_size, seqlen_k, nheads_kv, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref).requires_grad_()
if has_qv:
qv_ref = torch.randn(batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
else:
qv_ref = None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
if dtype == torch.float8_e4m3fn:
q_descale, k_descale, v_descale = [torch.rand(batch_size, nheads_kv, device=device, dtype=torch.float32) * 2 for _ in range(3)]
else:
q_descale, k_descale, v_descale = None, None, None
q, k, v = [x.detach().requires_grad_() for x in (q_ref, k_ref, v_ref)]
qv = qv_ref.detach() if has_qv else None
query_padding_mask = generate_random_padding_mask(
seqlen_q, batch_size, device, mode="random", zero_lengths=False
)
key_padding_mask = generate_random_padding_mask(
seqlen_k, batch_size, device, mode="random", zero_lengths=True
)
def _gen_unused_masks(padding_mask, add_unused, max_seq_len, bs, device):
if add_unused:
another_mask = generate_random_padding_mask(max_seq_len, bs, device)
attn_mask = torch.logical_and(padding_mask, another_mask)
unused_mask = torch.logical_xor(
torch.logical_or(padding_mask, another_mask), attn_mask
)
else:
attn_mask = padding_mask
unused_mask = None
return attn_mask, unused_mask
query_padding_mask, query_unused_mask = _gen_unused_masks(
query_padding_mask, add_unused_qkv, seqlen_q, batch_size, q.device
)
key_padding_mask, key_unused_mask = _gen_unused_masks(
key_padding_mask, add_unused_qkv, seqlen_k, batch_size, k.device
)
(
q_unpad,
k_unpad,
v_unpad,
qv_unpad,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
q,
k,
v,
qv,
output_pad_fn,
dq_pad_fn,
dk_pad_fn,
) = generate_qkv(q, k, v, query_padding_mask, key_padding_mask, qv=qv, kvpacked=False,
query_unused_mask=query_unused_mask, key_unused_mask=key_unused_mask)
q_unpad, k_unpad, v_unpad = [x.detach().to(dtype).requires_grad_() for x in (q_unpad, k_unpad, v_unpad)]
out_ref, attn_ref = attention_ref(
q_ref,
k_ref,
v_ref,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv_ref,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap
)
out_pt, attn_pt = attention_ref(
q_ref,
k_ref,
v_ref,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv_ref,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
upcast=False,
reorder_ops=True,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None,
)
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
if query_unused_mask is not None:
q_zero_masking = rearrange(query_unused_mask, "b s -> b s 1 1")
# Numerical error if we just do any arithmetic on out_ref
fwd_atol = 2 * (out_ref + 0.3 - 0.3 - out_ref).abs().max().item()
rtol = 2 if softcap == 0.0 else 3
pack_gqa_vals = [False, True] if not DISABLE_PACKGQA else [False]
num_splits_vals = [1, 3] if not DISABLE_SPLIT else [1]
for pack_gqa, num_splits in itertools.product(pack_gqa_vals, num_splits_vals):
out_unpad = flash_attn_varlen_func(
q_unpad,
k_unpad,
v_unpad,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
seqused_q=seqused_q,
seqused_k=seqused_k,
causal=causal,
qv=qv_unpad,
q_descale=q_descale,
k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
)
out = output_pad_fn(out_unpad)
if query_unused_mask is not None:
out.masked_fill_(q_zero_masking, 0.0)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
# if not causal:
# print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most 3x the numerical error
# of a Pytorch implementation.
assert (out - out_ref).abs().max().item() <= rtol * (out_pt - out_ref).abs().max().item() + fwd_atol
if (
not DISABLE_BACKWARD
and dtype != torch.float8_e4m3fn
and not has_qv
and not dv > 256
and not attention_chunk != 0
):
g_unpad = torch.randn_like(out_unpad)
do_o = ((g_unpad.float() * out_unpad.float()).sum(-1)).transpose(-1, -2)
# import flash_attn_3_cuda
# dq_unpad, dk_unpad, dv_unpad, softmax_d, dq_accum, lse_log2 = flash_attn_3_cuda.bwd_varlen(
# g_unpad,
# q_unpad,
# k_unpad,
# v_unpad,
# out_unpad,
# lse,
# None,
# None,
# None,
# cu_seqlens_q,
# cu_seqlens_k,
# None, None,
# max_seqlen_q,
# max_seqlen_k,
# d ** (-0.5),
# causal,
# window_size[0], window_size[1],
# softcap,
# deterministic,
# 0, # sm_margin
# )
dq_unpad, dk_unpad, dv_unpad = torch.autograd.grad(out_unpad, (q_unpad, k_unpad, v_unpad), g_unpad)
dq = dq_pad_fn(dq_unpad)
dk = dk_pad_fn(dk_unpad)
dv = dk_pad_fn(dv_unpad)
if key_unused_mask is not None:
k_zero_masking = rearrange(key_unused_mask, "b s -> b s 1 1")
dk.masked_fill_(k_zero_masking, 0.0)
dv.masked_fill_(k_zero_masking, 0.0)
if query_unused_mask is not None:
dq.masked_fill_(q_zero_masking, 0.0)
# print(f"dO_O max diff: {(softmax_d - do_o).abs().max().item()}")
# assert (softmax_d - do_o).abs().max().item() <= 1e-5
# assert dq_accum.abs().max().item() == 0.0
g = output_pad_fn(g_unpad)
# qk = torch.einsum('bthd,bshd->bhts', q / (d ** 0.5), k).float()
# qk = torch.masked_fill(qk, rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
# dS = torch.einsum('bthd,bshd->bhts', g.float(), v.float())
# P = torch.softmax(qk, -1)
# dP = P * (dS - (g.float() * out.float()).sum(-1).transpose(1, 2).unsqueeze(-1))
# dQ = torch.einsum('bhts,bshd->bthd', dP, k.float())
# dV = torch.einsum('bhts,bthd->bshd', P, g.float())
# dK = torch.einsum('bhts,bthd->bshd', dP, q.float())
# dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
dq_ref, dk_ref, dv_ref = torch.autograd.grad(out_ref, (q_ref, k_ref, v_ref), g)
dq_pt, dk_pt, dv_pt = torch.autograd.grad(out_pt, (q_ref, k_ref, v_ref), g)
print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
# breakpoint()
dq_atol = 2 * (dq_ref + 0.3 - 0.3 - dq_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dq - dq_ref).abs().max().item() <= rtol * (dq_pt - dq_ref).abs().max().item() + dq_atol
dk_atol = 2 * (dk_ref + 0.3 - 0.3 - dk_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dk - dk_ref).abs().max().item() <= rtol * (dk_pt - dk_ref).abs().max().item() + dk_atol
dv_atol = 2 * (dv_ref + 0.3 - 0.3 - dv_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dv - dv_ref).abs().max().item() <= rtol * (dv_pt - dv_ref).abs().max().item() + dv_atol
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
@pytest.mark.parametrize("dtype", [torch.bfloat16] + ([torch.float8_e4m3fn] if not DISABLE_FP8 else []))
# @pytest.mark.parametrize("dtype", [torch.bfloat16])
# @pytest.mark.parametrize("dtype", [torch.float8_e4m3fn])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["mha"])
@pytest.mark.parametrize("new_kv", [False] + ([True] if not DISABLE_APPENDKV else []))
# @pytest.mark.parametrize("new_kv", [True])
@pytest.mark.parametrize("causal,local", [(False, False), (True, False)] + ([(False, True)] if not DISABLE_LOCAL else []))
# @pytest.mark.parametrize("causal,local", [(False, False), (True, False)])
# @pytest.mark.parametrize("causal,local", [(False, False)])
@pytest.mark.parametrize("seqlen_new_eq_seqlen_q", [True, False] if not DISABLE_APPENDKV else [True])
# @pytest.mark.parametrize("seqlen_new_eq_seqlen_q", [True])
@pytest.mark.parametrize("has_rotary_seqlens", [False, True])
# @pytest.mark.parametrize("has_rotary_seqlens", [False])
@pytest.mark.parametrize("rotary_interleaved", [False, True] if not DISABLE_APPENDKV else [False])
# @pytest.mark.parametrize("rotary_interleaved", [True])
@pytest.mark.parametrize("rotary_fraction", [0.0, 0.5, 1.0] if (not DISABLE_APPENDKV) and (apply_rotary_emb is not None) else [0.0])
# @pytest.mark.parametrize("rotary_fraction", [0.0])
@pytest.mark.parametrize("page_size", [None] + ([1, 4, 128] if not DISABLE_PAGEDKV else []))
# @pytest.mark.parametrize("page_size", [None])
@pytest.mark.parametrize("has_leftpad", [False])
# @pytest.mark.parametrize("has_leftpad", [False])
@pytest.mark.parametrize("has_batch_idx", [False])
# @pytest.mark.parametrize("has_batch_idx", [False])
@pytest.mark.parametrize("varlen_q", [False])
# @pytest.mark.parametrize("varlen_q", [False])
# @pytest.mark.parametrize("d", [32, 59, 64, 80, 128, 256])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
@pytest.mark.parametrize("d", [128])
# @pytest.mark.parametrize("d", [192])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(1, 128),
(1, 339),
(3, 1024),
(64, 800),
(64, 256),
(3, 799),
(64, 2048),
(16, 20000),
# (1, 128 * 1024),
# (16, 128 * 1024),
(128, 128),
(256, 512), # To test appending KV with more than 1 block
(2048, 3577), # Enough tile to test persistent scheduler
],
)
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
def test_flash_attn_kvcache(
seqlen_q,
seqlen_k,
d,
varlen_q,
has_batch_idx,
has_leftpad,
page_size,
rotary_fraction,
rotary_interleaved,
has_rotary_seqlens,
seqlen_new_eq_seqlen_q,
causal,
local,
new_kv,
mha_type,
dtype,
):
if page_size is not None and seqlen_k % page_size != 0:
pytest.skip()
if seqlen_q > seqlen_k and new_kv:
pytest.skip()
if not new_kv and rotary_fraction > 0.0:
pytest.skip()
if rotary_fraction == 0.0 and has_rotary_seqlens:
pytest.skip()
device = "cuda"
# set seed
torch.random.manual_seed(0)
batch_size = 5
# batch_size = 1
batch_size_cache = batch_size if not has_batch_idx else batch_size * 2
nheads = 6
# nheads = 1
# rotary_dim must be a multiple of 16, and must be <= d
rotary_dim = math.floor(int(rotary_fraction * d) / 16) * 16
nheads_k = nheads if mha_type == "mha" else (1 if mha_type == "mqa" else 3)
assert nheads % nheads_k == 0
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
if dtype == torch.float8_e4m3fn:
dv_vals = [d]
attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0] if (causal or local) and not DISABLE_LOCAL else [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
has_qv = d == 64 and dv >= 256
q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
if has_qv:
qv = torch.randn(batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
else:
qv = None
if varlen_q:
query_padding_mask = generate_random_padding_mask(seqlen_q, batch_size, device, mode="random")
q_unpad, indices_q, cu_seqlens_q, max_seqlen_q, *rest = unpad_input(q, query_padding_mask)
output_pad_fn = lambda output_unpad: pad_input(
output_unpad, indices_q, batch_size, seqlen_q
)
qv_unpad = rearrange(qv, "b s ... -> (b s) ...")[indices_q] if has_qv else None
else:
query_padding_mask = None
q_unpad = q
qv_unpad = qv
cu_seqlens_q, max_seqlen_q = None, None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
seqlen_new = seqlen_q if seqlen_new_eq_seqlen_q else torch.randint(1, seqlen_q + 1, (1,)).item()
cu_seqlens_k_new = None
key_new_padding_mask = None
if new_kv:
k = torch.randn(batch_size, seqlen_new, nheads_k, d, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
v = torch.randn(batch_size, seqlen_new, nheads_k, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
if varlen_q: # k & v are also varlen
key_new_padding_mask = generate_random_padding_mask(seqlen_new, batch_size, device, mode="random")
k_unpad, indices_k, cu_seqlens_k_new, *rest = unpad_input(k, key_new_padding_mask)
v_unpad, *rest = unpad_input(v, key_new_padding_mask)
else:
k_unpad, v_unpad = k, v
else:
k, v, k_unpad, v_unpad = None, None, None, None
if page_size is None:
k_cache = torch.randn(batch_size_cache, seqlen_k, nheads_k, d, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
v_cache = torch.randn(batch_size_cache, seqlen_k, nheads_k, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
page_table = None
else:
(
k_cache,
v_cache,
page_table,
k_cache_paged,
v_cache_paged,
num_blocks,
) = _generate_block_kvcache(
seqlen_k, page_size, batch_size_cache, nheads_k, d, dv, device, dtype, dtype_ref
)
cache_seqlens = torch.randint(
0 if new_kv else 1,
# If we don't use seqlen_q in the case of causal and rotary, cos/sin won't be long enough
(
(seqlen_k - (seqlen_q if (causal or local) and rotary_dim > 1 else seqlen_new) + 1)
if new_kv
else (seqlen_k + 1)
),
(batch_size,),
dtype=torch.int32,
device=device,
)
if has_leftpad:
cache_leftpad = torch.cat([torch.randint(0, cache_seqlens[i].item(), (1,), dtype=torch.int32, device=device)
if cache_seqlens[i].item() > 0 else torch.zeros(1, dtype=torch.int32, device=device)
for i in range(batch_size)])
else:
cache_leftpad = None
if has_batch_idx:
cache_batch_idx = torch.randperm(batch_size_cache, dtype=torch.int32, device=device)[
:batch_size
]
else:
cache_batch_idx = None
arange = rearrange(torch.arange(seqlen_k, device=device), "s -> 1 s")
cache_seqlens_expanded = rearrange(cache_seqlens, "b -> b 1")
if not new_kv:
key_padding_mask = arange < cache_seqlens_expanded
else:
k_new_seqlens = key_new_padding_mask.sum(-1, keepdims=True) if varlen_q else seqlen_new
key_padding_mask = arange < cache_seqlens_expanded + k_new_seqlens
if has_leftpad:
key_padding_mask = torch.logical_and(
key_padding_mask, arange >= cache_leftpad.unsqueeze(-1).expand(-1, seqlen_k)
)
# cache_seqlens = torch.tensor([64], dtype=torch.int32, device=device)
rotary_seqlens = cache_seqlens if not has_rotary_seqlens else cache_seqlens // 2
if rotary_dim > 0:
angle = (
torch.rand(
seqlen_k if page_size is None else num_blocks * page_size,
rotary_dim // 2,
device=device,
)
* 2
* math.pi
)
cos = torch.cos(angle).to(dtype=dtype_ref).to(dtype).to(dtype_ref)
sin = torch.sin(angle).to(dtype=dtype_ref).to(dtype).to(dtype_ref)
if causal or local:
q_ro = apply_rotary_emb(
q, cos, sin, seqlen_offsets=rotary_seqlens, interleaved=rotary_interleaved
)
else:
q_ro = rearrange(
apply_rotary_emb(
rearrange(q, "b s h d -> b 1 (s h) d"),
cos,
sin,
seqlen_offsets=rotary_seqlens,
interleaved=rotary_interleaved,
),
"b 1 (s h) d -> b s h d",
s=seqlen_q,
)
# q_ro = q
k_ro = apply_rotary_emb(
k, cos, sin, seqlen_offsets=rotary_seqlens, interleaved=rotary_interleaved
)
else:
cos, sin = None, None
q_ro, k_ro = q, k
# k_cache[:, 64:] = -1
k_cache_ref = (k_cache if not has_batch_idx else k_cache[cache_batch_idx]).clone()
v_cache_ref = (v_cache if not has_batch_idx else v_cache[cache_batch_idx]).clone()
if new_kv:
update_mask = torch.logical_and(
cache_seqlens_expanded <= arange, arange < cache_seqlens_expanded + k_new_seqlens
)
k_to_update = rearrange(k_ro, "b s ... -> (b s) ...")
v_to_update = rearrange(v, "b s ... -> (b s) ...")
if varlen_q:
k_to_update = k_to_update[indices_k]
v_to_update = v_to_update[indices_k]
k_cache_ref[update_mask] = k_to_update
v_cache_ref[update_mask] = v_to_update
k_cache_rep = repeat(k_cache_ref, "b s h d -> b s (h g) d", g=nheads // nheads_k)
v_cache_rep = repeat(v_cache_ref, "b s h d -> b s (h g) d", g=nheads // nheads_k)
out_ref, _ = attention_ref(
q_ro,
k_cache_rep,
v_cache_rep,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv,
window_size=window_size,
attention_chunk=attention_chunk,
key_leftpad=cache_leftpad,
)
out_pt, _ = attention_ref(
q_ro,
k_cache_rep,
v_cache_rep,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv,
window_size=window_size,
attention_chunk=attention_chunk,
upcast=False,
reorder_ops=True,
key_leftpad=cache_leftpad,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None
)
q = q.to(dtype)
q_unpad = q_unpad.to(dtype) if varlen_q else None
k_cache = k_cache.to(dtype)
v_cache = v_cache.to(dtype)
k_cache_paged = k_cache_paged.to(dtype) if page_size is not None else None
v_cache_paged = v_cache_paged.to(dtype) if page_size is not None else None
k = k.to(dtype) if k is not None else None
v = v.to(dtype) if v is not None else None
k_unpad = k_unpad.to(dtype) if k_unpad is not None else None
v_unpad = v_unpad.to(dtype) if v_unpad is not None else None
qv = qv.to(dtype) if qv is not None else None
qv_unpad = qv_unpad.to(dtype) if (varlen_q and qv is not None) else None
cos = cos.to(dtype) if cos is not None else None
sin = sin.to(dtype) if sin is not None else None
k_cache_saved = k_cache.clone() if page_size is None else k_cache_paged.clone()
v_cache_saved = v_cache.clone() if page_size is None else v_cache_paged.clone()
num_splits_vals = [1, 0] if not DISABLE_SPLIT else [1]
precompute_metadata_vals = [False]
for num_splits, precompute_metadata in itertools.product(num_splits_vals, precompute_metadata_vals):
if precompute_metadata:
scheduler_metadata = get_scheduler_metadata(
batch_size, max_seqlen_q if varlen_q else seqlen_q, seqlen_k, nheads, nheads_k, d,
cache_seqlens, q.dtype, headdim_v=dv, cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k_new=cu_seqlens_k_new, cache_leftpad=cache_leftpad,
max_seqlen_k_new=seqlen_new, page_size=page_size,
causal=causal, window_size=window_size, attention_chunk=attention_chunk,
num_splits=num_splits
)
else:
scheduler_metadata = None
# Repeat to test metadata reuse
for _ in range(1 if not precompute_metadata else 2):
if page_size is None:
k_cache.copy_(k_cache_saved)
v_cache.copy_(v_cache_saved)
else:
k_cache_paged.copy_(k_cache_saved)
v_cache_paged.copy_(v_cache_saved)
out, lse, *rest = flash_attn_with_kvcache(
q if not varlen_q else q_unpad,
k_cache if page_size is None else k_cache_paged,
v_cache if page_size is None else v_cache_paged,
k if not new_kv or not varlen_q else k_unpad,
v if not new_kv or not varlen_q else v_unpad,
qv=qv if not varlen_q else qv_unpad,
rotary_cos=cos,
rotary_sin=sin,
cache_seqlens=cache_seqlens,
cache_batch_idx=cache_batch_idx,
cache_leftpad=cache_leftpad,
page_table=page_table,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k_new=cu_seqlens_k_new,
max_seqlen_q=max_seqlen_q,
rotary_seqlens=rotary_seqlens,
causal=causal,
window_size=window_size,
attention_chunk=attention_chunk,
rotary_interleaved=rotary_interleaved,
scheduler_metadata=scheduler_metadata,
num_splits=num_splits,
return_softmax_lse=True
)
if varlen_q:
out = output_pad_fn(out)
# out = flash_attn_with_kvcache(
# q, k_cache, v_cache, cache_seqlens=cache_seqlens, causal=causal, window_size=window_size
# )
# out = flash_attn_with_kvcache(q, k_cache, v_cache, causal=causal, window_size=window_size)
# qk = torch.einsum("bqhd,bkhd->bhqk", q, k_cache_ref)
# m = qk.amax(-1, keepdim=True)
# s_tmp = torch.exp((qk - m) / math.sqrt(d))
# o1 = torch.einsum('bhst,bthd->bshd', s_tmp, v_cache_ref)
# lse_ref = torch.logsumexp(qk / math.sqrt(d), -1)
# probs = torch.softmax(qk, dim=-1)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
if new_kv:
if page_size is None:
k_cache_select = (
k_cache.to(dtype_ref) if not has_batch_idx else k_cache.to(dtype_ref)[cache_batch_idx]
)
v_cache_select = (
v_cache.to(dtype_ref) if not has_batch_idx else v_cache.to(dtype_ref)[cache_batch_idx]
)
else:
k_cache_select = rearrange(
k_cache_paged.to(dtype_ref)[(page_table if not has_batch_idx else page_table[cache_batch_idx]).flatten()],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k].to(dtype_ref)
v_cache_select = rearrange(
v_cache_paged.to(dtype_ref)[(page_table if not has_batch_idx else page_table[cache_batch_idx]).flatten()],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k].to(dtype_ref)
k_cache_ref = k_cache_ref.to(dtype).to(dtype_ref)
v_cache_ref = v_cache_ref.to(dtype).to(dtype_ref)
if dtype is not torch.float8_e4m3fn:
assert torch.equal(v_cache_select, v_cache_ref)
else:
assert torch.allclose(v_cache_select, v_cache_ref, rtol=1e-3, atol=1e-3)
# breakpoint()
# if rotary_dim == 0 and dtype is not torch.float8_e4m3fn:
if rotary_dim == 0:
assert torch.equal(k_cache_select, k_cache_ref)
else:
# if not torch.allclose(k_cache_select, k_cache_ref, rtol=1e-3, atol=1e-3):
# breakpoint()
if dtype is not torch.float8_e4m3fn:
assert torch.allclose(k_cache_select, k_cache_ref, rtol=1e-3, atol=1e-3)
else:
assert torch.allclose(k_cache_select, k_cache_ref, rtol=1e-1, atol=1e-1)
mult = 4 if dtype == torch.float8_e4m3fn else 2
assert (out - out_ref).abs().max().item() <= mult * (out_pt - out_ref).abs().max().item() + 1e-5
mult_mean = 3 if dtype == torch.float8_e4m3fn else 1.5
assert (out - out_ref).abs().mean().item() <= mult_mean * (out_pt - out_ref).abs().mean().item()
def _generate_block_kvcache(seqlen_k, page_size, batch_size, nheads_k, d, dv, device, dtype, dtype_ref):
num_blocks = math.ceil(seqlen_k / page_size) * batch_size * 3
k_cache_paged = torch.randn(
num_blocks, page_size, nheads_k, d, device=device, dtype=dtype_ref
).to(dtype).to(dtype_ref)
v_cache_paged = torch.randn(
num_blocks, page_size, nheads_k, dv, device=device, dtype=dtype_ref
).to(dtype).to(dtype_ref)
page_table = rearrange(
torch.randperm(num_blocks, dtype=torch.int32, device=device),
"(b nblocks) -> b nblocks",
b=batch_size,
)
k_cache = rearrange(
k_cache_paged[page_table.flatten()],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k]
v_cache = rearrange(
v_cache_paged[page_table.flatten()],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k]
return k_cache, v_cache, page_table, k_cache_paged, v_cache_paged, num_blocks
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize('causal', [False])
@pytest.mark.parametrize('d', [128])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(64, 8192),
],
)
def test_flash_attn_cluster(seqlen_q, seqlen_k, d, causal, dtype):
device = "cuda"
torch.random.manual_seed(0)
batch_size = 2
nheads = 16
nheads_kv = 4
# There was a bug where this would cause "unspecified launch failure" due to Cluster
q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype)
k = torch.randn(batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype)
v = torch.randn(batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype)
for _ in range(100):
flash_attn_func(q, k, v, causal=causal)
# @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize('causal', [False])
@pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128])
# @pytest.mark.parametrize('d', [32, 56, 64, 80, 96, 128])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [80])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(1, 239),
(239, 1),
(3, 799),
(799, 3),
(1024, 128),
(97, 97),
(128, 128),
(200, 200),
(256, 256),
(257, 257),
(384, 384),
(512, 512),
(768, 768),
(1024, 1024),
(2048, 2048),
],
)
@pytest.mark.skip(reason="Cannot be run in parallel with other tests due to memory usage")
def test_flash_attn_race_condition(seqlen_q, seqlen_k, d, causal, dtype):
device = "cuda"
# set seed
torch.random.manual_seed(0)
# Simulate under memory load
dummy = torch.empty(70 * 1024 ** 3, dtype=torch.uint8, device=device)
batch_size = 60 # Sometimes we need large batch size for the race conditions to trigger
nheads = 4
q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
torch.random.manual_seed(42)
out0 = flash_attn_func(q, k, v, causal=causal)
g = torch.randn_like(out0)
dq0, dk0, dv0 = torch.autograd.grad(out0, (q, k, v), g)
# Numerical error if we just do any arithmetic on dq
dq_atol = 2 * ((dq0 + 0.3 - 0.3) - dq0).abs().max().item()
for i in range(1000):
torch.random.manual_seed(42)
out = flash_attn_func(q, k, v, causal=causal)
assert torch.equal(out, out0)
# assert torch.equal(lse, lse0)
dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
dq_equal = torch.allclose(dq, dq0, atol=dq_atol)
if not dq_equal:
print(f"Iter {i}, {dq_atol = }, dQ max diff: {(dq - dq0).abs().max().item()}")
# breakpoint()
assert torch.equal(dv, dv0)
assert torch.equal(dk, dk0)
assert dq_equal
def attention_combine_ref(out_partial, lse_partial):
"""
out_partial: (num_splits, batch_size, seqlen, nheads, d)
lse_partial: (num_splits, batch_size, nheads, seqlen)
"""
lse = torch.logsumexp(lse_partial, dim=0)
scale = torch.exp(lse_partial - lse)
scale = torch.where(torch.isinf(scale) | torch.isnan(scale), torch.zeros_like(scale), scale)
out = (scale.unsqueeze(-1) * out_partial).sum(0)
return out, lse
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16])
# @pytest.mark.parametrize("dtype", [torch.float32])
# @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
@pytest.mark.parametrize("d", [64, 96, 128, 192, 256, 512])
# @pytest.mark.parametrize("d", [128])
@pytest.mark.parametrize("seqlen", [1, 2, 3, 32, 64, 256, 113, 108, 640, 1024])
# @pytest.mark.parametrize("seqlen", [12, 32, 64, 256, 112, 108, 640, 1024, 2048, 8192])
# @pytest.mark.parametrize("seqlen", [15])
@pytest.mark.parametrize("num_splits", [1, 2, 3, 5, 17, 32, 55, 97, 133])
# @pytest.mark.parametrize("num_splits", [1, 2, 3, 5, 11])
# @pytest.mark.parametrize("num_splits", [128])
def test_flash_attn_combine(num_splits, seqlen, d, dtype):
if DISABLE_SPLIT:
pytest.skip()
device = "cuda"
# set seed
torch.random.manual_seed(1)
batch_size = 5
nheads = 16
# batch_size = 1
# nheads = 1
out_partial = torch.randn(num_splits * 2, batch_size, nheads, seqlen, d, device=device, dtype=torch.float32).transpose(2, 3)[:num_splits] # To test non-contiguous tensor
lse_partial = torch.randn(num_splits, batch_size, nheads * 2, seqlen, device=device, dtype=torch.float32).transpose(-1, -2)[:, :, :, :nheads] # To test non-contiguous tensor
# To test short-circuiting based on num_splits
lse_partial[num_splits // 2:, :batch_size // 3] = -float("inf")
out, lse = flash_attn_combine(out_partial, lse_partial, out_dtype=dtype)
out_ref, lse_ref = attention_combine_ref(out_partial, lse_partial)
out_pt = out_ref.to(dtype)
print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
print(f"LSE mean diff: {(lse - lse_ref).abs().mean().item()}")
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
# breakpoint()
assert torch.allclose(lse, lse_ref, atol=1e-5, rtol=1e-5)
multiple = 2
assert ((out - out_ref).abs().max().item() <= multiple * (out_pt - out_ref).abs().max().item()) or torch.allclose(out, out_pt, atol=1e-5, rtol=1e-5)
# from flash_attn.utils.benchmark import pytorch_profiler
# # pytorch_profiler(torch.sum, lse_partial)
# pytorch_profiler(flash_attn_combine, out_partial, lse_partial)
# pytorch_profiler(torch.sum, out_partial)
@pytest.mark.skip(reason="AMD Triton backend doesn't use torch ops registration")
def test_flash3_bw_compatibility() -> None:
# Let's try to always stay backward compatible! This will make life easier
# for downstream libaries, users, and exported models.
# 1/ Instead of removing arguments, error out if their value is no longer supported
# 2/ When adding arguments, add them at the end with a default value
assert torch.ops.flash_attn_3.fwd.default._schema.is_backward_compatible_with(parse_schema(
"flash_attn_3::fwd(Tensor q, Tensor k, Tensor v, Tensor(k_new!)? k_new=None, "
"Tensor(v_new!)? v_new=None, Tensor? q_v=None, Tensor(out!)? out=None, "
"Tensor? cu_seqlens_q=None, Tensor? cu_seqlens_k=None, "
"Tensor? cu_seqlens_k_new=None, Tensor? seqused_q=None, Tensor? seqused_k=None, "
"int? max_seqlen_q=None, int? max_seqlen_k=None, Tensor? page_table=None, "
"Tensor? kv_batch_idx=None, Tensor? leftpad_k=None, Tensor? rotary_cos=None, Tensor? rotary_sin=None, "
"Tensor? seqlens_rotary=None, Tensor? q_descale=None, Tensor? k_descale=None, Tensor? v_descale=None, "
"float? softmax_scale=None, bool is_causal=False, int window_size_left=-1, int window_size_right=-1, "
"int attention_chunk=0, float softcap=0., bool is_rotary_interleaved=False, "
"Tensor? scheduler_metadata=None, int num_splits=0, bool? pack_gqa=None, int sm_margin=0) "
"-> (Tensor(out!), Tensor, Tensor, Tensor)"
))
assert torch.ops.flash_attn_3.bwd.default._schema.is_backward_compatible_with(parse_schema(
"flash_attn_3::bwd(Tensor dout, Tensor q, Tensor k, Tensor v, Tensor out, Tensor softmax_lse, "
"Tensor(dq!)? dq=None, Tensor(dk!)? dk=None, Tensor(dv!)? dv=None, Tensor? cu_seqlens_q=None, "
"Tensor? cu_seqlens_k=None, Tensor? seqused_q=None, Tensor? seqused_k=None, int? max_seqlen_q=None, "
"int? max_seqlen_k=None, float? softmax_scale=None, bool is_causal=False, int window_size_left=-1, "
"int window_size_right=-1, float softcap=0., bool deterministic=False, int sm_margin=0) "
"-> (Tensor(dq!), Tensor(dk!), Tensor(dv!), Tensor, Tensor, Tensor, Tensor, Tensor)"
))
assert torch.ops.flash_attn_3.fwd_combine.default._schema.is_backward_compatible_with(parse_schema(
"flash_attn_3::fwd_combine(Tensor out_partial, Tensor lse_partial, Tensor(out!)? out=None, "
"ScalarType? out_dtype=None) -> (Tensor(out!), Tensor)"
))
assert torch.ops.flash_attn_3.get_scheduler_metadata.default._schema.is_backward_compatible_with(parse_schema(
"flash_attn_3::get_scheduler_metadata(int batch_size, int max_seqlen_q, int max_seqlen_k, "
"int num_heads, int num_heads_k, int headdim, int headdim_v, ScalarType qkv_dtype, Tensor seqused_k, "
"Tensor? cu_seqlens_q=None, Tensor? cu_seqlens_k=None, Tensor? cu_seqlens_k_new=None, "
"Tensor? seqused_q=None, Tensor? leftpad_k=None, int? page_size=None, int max_seqlen_k_new=0, "
"bool is_causal=False, int window_size_left=-1, int window_size_right=-1, "
"int attention_chunk=0, bool has_softcap=False, int num_splits=0, bool? pack_gqa=None, "
"int sm_margin=0) -> Tensor"
))
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "hopper/test_flash_attn_triton_amd.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:flash_attn/cute/cute_dsl_ptxas.py | """
System ptxas replacement for CUTLASS DSL.
Environment variables:
CUTE_DSL_PTXAS_PATH - Path to ptxas (e.g., /usr/local/cuda/bin/ptxas)
CUTE_DSL_PTXAS_VERBOSE - Set to 1 for verbose output
"""
import os
import sys
import re
import ctypes
import subprocess
from pathlib import Path
import cutlass
CUTE_DSL_PTXAS_PATH = os.environ.get("CUTE_DSL_PTXAS_PATH", None)
VERBOSE = os.environ.get("CUTE_DSL_PTXAS_VERBOSE", "0") == "1"
_original_load_cuda_library = None
_user_wanted_ptx = False # True if user originally set CUTE_DSL_KEEP_PTX=1
def _log(msg):
if VERBOSE:
print(f"[ptxas] {msg}", file=sys.stderr)
def _get_ptx(compiled_func) -> tuple[str, Path] | None:
"""Find and read PTX file, stripping null bytes."""
func_name = getattr(compiled_func, "function_name", None)
if not func_name:
return None
dump_dir = os.environ.get("CUTE_DSL_DUMP_DIR", Path.cwd())
for ptx_path in Path(dump_dir).glob(f"*{func_name}*.ptx"):
content = ptx_path.read_text().rstrip("\x00")
if ".entry " in content and content.rstrip().endswith("}"):
_log(f"Found PTX: {ptx_path}")
return content, ptx_path
return None
def _compile_ptx(ptx_path: Path, ptx_content: str) -> bytes:
"""Compile PTX to cubin using system ptxas."""
# Extract arch from PTX
match = re.search(r"\.target\s+(sm_\d+[a-z]?)", ptx_content)
arch = match.group(1) if match else "sm_90a"
# Write stripped content back if needed
if ptx_path.read_text() != ptx_content:
ptx_path.write_text(ptx_content)
# Compile
cubin_tmp = ptx_path.with_suffix(".cubin.tmp")
try:
assert CUTE_DSL_PTXAS_PATH is not None
result = subprocess.run(
[CUTE_DSL_PTXAS_PATH, f"-arch={arch}", "-O3", "-o", str(cubin_tmp), str(ptx_path)],
capture_output=True,
text=True,
)
if result.returncode != 0:
raise RuntimeError(f"ptxas failed: {result.stderr}")
cubin_data = cubin_tmp.read_bytes()
_log(f"Compiled {ptx_path.name} -> {len(cubin_data)} bytes ({arch})")
# Save cubin if CUTE_DSL_KEEP_CUBIN is set
if os.environ.get("CUTE_DSL_KEEP_CUBIN", "0") == "1":
cubin_out = ptx_path.with_suffix(".cubin")
cubin_out.write_bytes(cubin_data)
_log(f"Saved: {cubin_out}")
return cubin_data
finally:
cubin_tmp.unlink(missing_ok=True)
def _patched_load_cuda_library(self):
"""Replacement for _load_cuda_library that uses system ptxas."""
result = _get_ptx(self)
if not result:
_log("PTX not found, falling back to embedded ptxas")
return _original_load_cuda_library(self)
ptx_content, ptx_path = result
try:
cubin = _compile_ptx(ptx_path, ptx_content)
except Exception as e:
_log(f"Compilation failed ({e}), falling back to embedded ptxas")
return _original_load_cuda_library(self)
# Load cubin
import cuda.bindings.runtime as cuda_runtime
err, library = cuda_runtime.cudaLibraryLoadData(cubin, None, None, 0, None, None, 0)
if err != cuda_runtime.cudaError_t.cudaSuccess:
_log(f"cudaLibraryLoadData failed ({err}), falling back to embedded ptxas")
return _original_load_cuda_library(self)
# Register kernels on all devices
_, cuda_load_to_device = self._get_cuda_init_and_load()
lib_ptr = ctypes.c_void_p(int(library))
dev_id = ctypes.c_int32(0)
err_val = ctypes.c_int32(0)
args = (ctypes.c_void_p * 3)(
ctypes.cast(ctypes.pointer(lib_ptr), ctypes.c_void_p),
ctypes.cast(ctypes.pointer(dev_id), ctypes.c_void_p),
ctypes.cast(ctypes.pointer(err_val), ctypes.c_void_p),
)
for dev in range(self.num_devices):
dev_id.value = dev
cuda_load_to_device(args)
if err_val.value != 0:
_log("cuda_load_to_device failed, falling back to embedded ptxas")
return _original_load_cuda_library(self)
_log(f"Loaded kernel from {ptx_path.name}")
# Delete PTX if user didn't originally want it kept
if not _user_wanted_ptx:
ptx_path.unlink(missing_ok=True)
return [cuda_runtime.cudaLibrary_t(lib_ptr.value)]
def patch():
"""Install system ptxas hook. Call before importing cutlass."""
global _original_load_cuda_library, _user_wanted_ptx
assert CUTE_DSL_PTXAS_PATH is not None
if not os.path.isfile(CUTE_DSL_PTXAS_PATH) or not os.access(CUTE_DSL_PTXAS_PATH, os.X_OK):
raise RuntimeError(f"ptxas not found: {CUTE_DSL_PTXAS_PATH}")
# Track if user originally wanted PTX kept
_user_wanted_ptx = os.environ.get("CUTE_DSL_KEEP_PTX", "0") == "1"
# os.environ['CUTE_DSL_KEEP_PTX'] = '1'
assert os.environ.get("CUTE_DSL_KEEP_PTX", "0") == "1", (
"Require CUTE_DSL_KEEP_PTX=1 to use system's ptxas"
)
cls = cutlass.cutlass_dsl.cuda_jit_executor.CudaDialectJitCompiledFunction
_original_load_cuda_library = cls._load_cuda_library
cls._load_cuda_library = _patched_load_cuda_library
_log("Patch applied")
return
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/cute_dsl_ptxas.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:tests/cute/score_mod_definitions.py | import torch
import cutlass
import cutlass.cute as cute
from cutlass._mlir.dialects import math as mlir_math
import operator
# =============================================================================
# Score_mod functions that don't use global indices
# All use signature: (tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors)
# =============================================================================
@cute.jit
def score_mod_identity(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
return tSrS_ssa
@cute.jit
def score_mod_identity_vectorized(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
return tSrS_ssa
@cute.jit
def score_mod_causal(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
mask = operator.ge(q_idx, kv_idx)
return cute.where(mask, tSrS_ssa, cute.full_like(tSrS_ssa, float("-inf")))
@cute.jit
def score_mod_causal_vectorized(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
mask = cute.make_rmem_tensor(kv_idx.shape, dtype=cutlass.Boolean)
kv_idx0 = kv_idx[0]
q_idx0 = q_idx[0]
for i in cutlass.range_constexpr(cute.size(mask.shape)):
mask[i] = q_idx0 >= kv_idx0 + i
mask_ssa = mask.load()
return cute.where(mask_ssa, tSrS_ssa, cute.full_like(tSrS_ssa, float("-inf")))
@cute.jit
def score_mod_rel_bias(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
diff = q_idx - kv_idx
abs_diff = cute.TensorSSA(mlir_math.absi(diff), diff.shape, diff.dtype)
return tSrS_ssa + abs_diff.to(cutlass.Float32)
@cute.jit
def score_mod_rel_bias_vectorized(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
q_idx0 = q_idx[0]
kv_idx0 = kv_idx[0]
diff0 = q_idx0 - kv_idx0
abs_diff = cute.make_rmem_tensor(kv_idx.shape, dtype=diff0.dtype)
for i in cutlass.range_constexpr(cute.size(kv_idx.shape)):
diffi = diff0 - i
abs_diff[i] = mlir_math.absi(diffi)
return tSrS_ssa + abs_diff.load().to(cutlass.Float32)
@cute.jit
def score_mod_rel_bias_x2(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
diff = q_idx - kv_idx
abs_diff = cute.TensorSSA(mlir_math.absi(diff), diff.shape, diff.dtype)
scaled = abs_diff * cute.full_like(abs_diff, 2)
return tSrS_ssa + scaled.to(cutlass.Float32)
@cute.jit
def score_mod_rel_bias_x2_vectorized(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
q_idx0 = q_idx[0]
kv_idx0 = kv_idx[0]
diff0 = q_idx0 - kv_idx0
abs_diff_x2 = cute.make_rmem_tensor(kv_idx.shape, dtype=diff0.dtype)
for i in cutlass.range_constexpr(cute.size(kv_idx.shape)):
diffi = diff0 - i
abs_diff_x2[i] = mlir_math.absi(diffi) * 2
return tSrS_ssa + abs_diff_x2.load().to(cutlass.Float32)
@cute.jit
def score_mod_times_two(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
return tSrS_ssa * cute.full_like(tSrS_ssa, 2)
score_mod_times_two_vectorized = score_mod_times_two
@cute.jit
def score_mod_alibi(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
score = tSrS_ssa.to(cutlass.Float32)
slope_exp = (h_idx + cute.full_like(h_idx, 1)) * cute.full_like(h_idx, -8)
slope = cute.math.exp2(
slope_exp.to(cutlass.Float32)
* cute.full_like(score, 0.125 * 0.6931471805599453 * 1.4426950408889634)
)
diff = q_idx - kv_idx
abs_diff = cute.TensorSSA(mlir_math.absi(diff), diff.shape, diff.dtype).to(cutlass.Float32)
return score - slope * abs_diff
@cute.jit
def score_mod_alibi_vectorized(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
score = tSrS_ssa.to(cutlass.Float32)
slope_exp = (h_idx + cute.full_like(h_idx, 1)) * cute.full_like(h_idx, -8)
slope = cute.math.exp2(
slope_exp.to(cutlass.Float32)
* cute.full_like(score, 0.125 * 0.6931471805599453 * 1.4426950408889634)
)
diff0 = q_idx[0] - kv_idx[0]
abs_diff = cute.make_rmem_tensor(kv_idx.shape, diff0.dtype)
for i in cutlass.range_constexpr(cute.size(abs_diff.shape)):
diffi = diff0 - i
abs_diff[i] = mlir_math.absi(diffi)
return score - slope * abs_diff.load().to(cutlass.Float32)
@cute.jit
def score_mod_sliding_window(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
diff = q_idx - kv_idx
abs_diff = cute.TensorSSA(mlir_math.absi(diff), diff.shape, diff.dtype)
mask = operator.le(abs_diff, cute.full_like(abs_diff, 256))
return cute.where(mask, tSrS_ssa, cute.full_like(tSrS_ssa, float("-inf")))
@cute.jit
def score_mod_block_diagonal(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
q_block = q_idx // 64
kv_block = kv_idx // 64
mask = operator.eq(q_block, kv_block)
return cute.where(mask, tSrS_ssa, cute.full_like(tSrS_ssa, float("-inf")))
@cute.jit
def score_mod_causal_v2(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
diff = q_idx - kv_idx
mask = operator.ge(diff, cute.full_like(diff, 0))
return cute.where(mask, tSrS_ssa, cute.full_like(tSrS_ssa, float("-inf")))
@cute.jit
def score_mod_batch_bias(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
batch_bias = aux_tensors[0]
dtype = batch_bias.element_type
b_frag = cute.make_fragment(1, cutlass.Int32)
b_frag.store(b_idx)
bias_frag = cute.make_fragment(1, dtype)
bias_frag[0] = batch_bias[b_frag[0]]
bias_val = (bias_frag.load()).to(cutlass.Float32)
return tSrS_ssa + bias_val
@cute.jit
def score_mod_batch_bias_vectorized(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
batch_bias = aux_tensors[0]
dtype = batch_bias.element_type
b_idx0 = b_idx[0]
bias_frag = cute.make_rmem_tensor(1, dtype)
bias_frag[0] = batch_bias[b_idx0]
bias_val = (bias_frag.load()).to(cutlass.Float32)
return tSrS_ssa + bias_val
@cute.jit
def score_mod_dual_buffer(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
head_bias = aux_tensors[0]
pos_bias = aux_tensors[1]
dtype = head_bias.element_type
h_frag = cute.make_fragment(1, cutlass.Int32)
h_frag.store(h_idx)
head_val_frag = cute.make_fragment(1, dtype)
head_val_frag[0] = head_bias[h_frag[0]]
head_val = (head_val_frag.load()).to(cutlass.Float32)
q_frag = cute.make_fragment(1, cutlass.Int32)
q_frag.store(q_idx)
pos_val_frag = cute.make_fragment(1, dtype)
pos_val_frag[0] = pos_bias[q_frag[0]]
pos_val = (pos_val_frag.load()).to(cutlass.Float32)
return tSrS_ssa + head_val + pos_val
@cute.jit
def score_mod_dual_buffer_vectorized(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
head_bias = aux_tensors[0]
pos_bias = aux_tensors[1]
dtype = head_bias.element_type
head_val_frag = cute.make_fragment(1, dtype)
head_val_frag[0] = head_bias[h_idx[0]]
head_val = (head_val_frag.load()).to(cutlass.Float32)
pos_val_frag = cute.make_fragment(1, dtype)
pos_val_frag[0] = pos_bias[q_idx[0]]
pos_val = (pos_val_frag.load()).to(cutlass.Float32)
return tSrS_ssa + head_val + pos_val
# =============================================================================
# Score_mod functions that use global indices
# All use signature: (tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors)
# Global indices computed as: q_idx_global = q_idx + seqlen_info.offset_q (and similarly for kv)
# =============================================================================
@cute.jit
def score_mod_global_kv_bias(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""Per-token bias using global kv index."""
offset_k = seqlen_info.offset_k
kv_idx_global = kv_idx + offset_k
token_bias = aux_tensors[0]
dtype = token_bias.element_type
kv_frag = cute.make_fragment(1, cutlass.Int32)
kv_frag.store(kv_idx_global)
bias_frag = cute.make_fragment(1, dtype)
bias_frag[0] = token_bias[kv_frag[0]]
return tSrS_ssa + (bias_frag.load()).to(cutlass.Float32)
@cute.jit
def score_mod_global_q_bias(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""Per-token bias using global q index."""
offset_q = seqlen_info.offset_q
q_idx_global = q_idx + offset_q
token_bias = aux_tensors[0]
dtype = token_bias.element_type
q_frag = cute.make_fragment(1, cutlass.Int32)
q_frag.store(q_idx_global)
bias_frag = cute.make_fragment(1, dtype)
bias_frag[0] = token_bias[q_frag[0]]
return tSrS_ssa + (bias_frag.load()).to(cutlass.Float32)
@cute.jit
def score_mod_global_rel_plus_kv_bias(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""Relative position (logical) + per-token bias (global kv)."""
offset_k = seqlen_info.offset_k
kv_idx_global = kv_idx + offset_k
token_bias = aux_tensors[0]
dtype = token_bias.element_type
rel_pos = q_idx - kv_idx
rel_pos_abs = cute.TensorSSA(mlir_math.absi(rel_pos), rel_pos.shape, rel_pos.dtype)
rel_bias = rel_pos_abs.to(cutlass.Float32) * cute.full_like(tSrS_ssa, 0.1)
kv_frag = cute.make_fragment(1, cutlass.Int32)
kv_frag.store(kv_idx_global)
bias_frag = cute.make_fragment(1, dtype)
bias_frag[0] = token_bias[kv_frag[0]]
return tSrS_ssa + rel_bias + (bias_frag.load()).to(cutlass.Float32)
@cute.jit
def score_mod_global_q_and_kv_bias(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""Both q and kv global indices."""
offset_q = seqlen_info.offset_q
q_idx_global = q_idx + offset_q
offset_k = seqlen_info.offset_k
kv_idx_global = kv_idx + offset_k
q_bias = aux_tensors[0]
kv_bias = aux_tensors[1]
dtype = q_bias.element_type
q_frag = cute.make_fragment(1, cutlass.Int32)
q_frag.store(q_idx_global)
q_bias_frag = cute.make_fragment(1, dtype)
q_bias_frag[0] = q_bias[q_frag[0]]
kv_frag = cute.make_fragment(1, cutlass.Int32)
kv_frag.store(kv_idx_global)
kv_bias_frag = cute.make_fragment(1, dtype)
kv_bias_frag[0] = kv_bias[kv_frag[0]]
return (
tSrS_ssa
+ (q_bias_frag.load()).to(cutlass.Float32)
+ (kv_bias_frag.load()).to(cutlass.Float32)
)
@cute.jit
def score_mod_global_logical_rel_plus_kv_bias(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""Logical relative + global-indexed per-token bias."""
offset_k = seqlen_info.offset_k
kv_idx_global = kv_idx + offset_k
token_bias = aux_tensors[0]
dtype = token_bias.element_type
rel_pos = q_idx - kv_idx
rel_pos_abs = cute.TensorSSA(mlir_math.absi(rel_pos), rel_pos.shape, rel_pos.dtype)
rel_bias = rel_pos_abs.to(cutlass.Float32) * cute.full_like(tSrS_ssa, 0.01)
kv_frag = cute.make_fragment(1, cutlass.Int32)
kv_frag.store(kv_idx_global)
bias_frag = cute.make_fragment(1, dtype)
bias_frag[0] = token_bias[kv_frag[0]]
return tSrS_ssa + rel_bias + (bias_frag.load()).to(cutlass.Float32)
# "Stress tests" - score_mods with complex global index usage
@cute.jit
def score_mod_stress_complex_arithmetic(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""All indices in complex arithmetic."""
offset_q = seqlen_info.offset_q
q_idx_global = q_idx + offset_q
bias = aux_tensors[0]
dtype = bias.element_type
# Use absolute value instead of squaring to avoid overflow with large sequences
rel_pos = q_idx - kv_idx
rel_pos_abs = cute.TensorSSA(mlir_math.absi(rel_pos), rel_pos.shape, rel_pos.dtype)
rel_bias = rel_pos_abs.to(cutlass.Float32) * cute.full_like(tSrS_ssa, 0.001)
q_frag = cute.make_fragment(1, cutlass.Int32)
q_frag.store(q_idx_global)
bias_q_frag = cute.make_fragment(1, dtype)
bias_q_frag[0] = bias[q_frag[0]]
bias_q = (bias_q_frag.load()).to(cutlass.Float32)
scale = (b_idx + cute.full_like(b_idx, 1)) * (h_idx + cute.full_like(h_idx, 1))
scale_f32 = scale.to(cutlass.Float32) * 0.001
result = tSrS_ssa + rel_bias + bias_q * scale_f32
return result
@cute.jit
def score_mod_stress_conditional_mask(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""Conditional masking with global vs logical."""
offset_q = seqlen_info.offset_q
q_idx_global = q_idx + offset_q
offset_k = seqlen_info.offset_k
kv_idx_global = kv_idx + offset_k
token_bias = aux_tensors[0]
dtype = token_bias.element_type
kv_frag = cute.make_fragment(1, cutlass.Int32)
kv_frag.store(kv_idx_global)
bias_frag = cute.make_fragment(1, dtype)
bias_frag[0] = token_bias[kv_frag[0]]
bias_val = (bias_frag.load()).to(cutlass.Float32)
is_causal = operator.ge(q_idx, kv_idx)
global_diff = q_idx_global - kv_idx_global
is_nearby = operator.le(
cute.TensorSSA(mlir_math.absi(global_diff), global_diff.shape, global_diff.dtype),
cute.full_like(global_diff, 512),
)
both_conditions = is_causal & is_nearby
return cute.where(both_conditions, tSrS_ssa + bias_val, cute.full_like(tSrS_ssa, float("-inf")))
@cute.jit
def score_mod_stress_multi_buffer(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""Multiple aux tensors with different indexing."""
offset_q = seqlen_info.offset_q
q_idx_global = q_idx + offset_q
offset_k = seqlen_info.offset_k
kv_idx_global = kv_idx + offset_k
batch_bias = aux_tensors[0]
head_scale = aux_tensors[1]
q_pos_bias = aux_tensors[2]
kv_pos_bias = aux_tensors[3]
rel_pos_scale = aux_tensors[4]
dtype = batch_bias.element_type
b_frag = cute.make_fragment(1, cutlass.Int32)
b_frag.store(b_idx)
bb_frag = cute.make_fragment(1, dtype)
bb_frag[0] = batch_bias[b_frag[0]]
bb_val = (bb_frag.load()).to(cutlass.Float32)
h_frag = cute.make_fragment(1, cutlass.Int32)
h_frag.store(h_idx)
hs_frag = cute.make_fragment(1, dtype)
hs_frag[0] = head_scale[h_frag[0]]
hs_val = (hs_frag.load()).to(cutlass.Float32)
qg_frag = cute.make_fragment(1, cutlass.Int32)
qg_frag.store(q_idx_global)
qpb_frag = cute.make_fragment(1, dtype)
qpb_frag[0] = q_pos_bias[qg_frag[0]]
qpb_val = (qpb_frag.load()).to(cutlass.Float32)
kvg_frag = cute.make_fragment(1, cutlass.Int32)
kvg_frag.store(kv_idx_global)
kvpb_frag = cute.make_fragment(1, dtype)
kvpb_frag[0] = kv_pos_bias[kvg_frag[0]]
kvpb_val = (kvpb_frag.load()).to(cutlass.Float32)
rel_idx = q_idx - kv_idx + cute.full_like(q_idx, 512)
rel_idx_clamped = cute.where(
operator.lt(rel_idx, cute.full_like(rel_idx, 0)), cute.full_like(rel_idx, 0), rel_idx
)
rel_idx_clamped = cute.where(
operator.gt(rel_idx_clamped, cute.full_like(rel_idx_clamped, 1024)),
cute.full_like(rel_idx_clamped, 1024),
rel_idx_clamped,
)
ri_frag = cute.make_fragment(1, cutlass.Int32)
ri_frag.store(rel_idx_clamped)
rps_frag = cute.make_fragment(1, dtype)
rps_frag[0] = rel_pos_scale[ri_frag[0]]
rps_val = (rps_frag.load()).to(cutlass.Float32)
return tSrS_ssa * hs_val + bb_val + qpb_val + kvpb_val + rps_val * cute.full_like(tSrS_ssa, 0.1)
@cute.jit
def score_mod_stress_global_offset(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""Verify global - logical = offset."""
offset_k = seqlen_info.offset_k
kv_idx_global = kv_idx + offset_k
token_bias = aux_tensors[0]
dtype = token_bias.element_type
kv_frag = cute.make_fragment(1, cutlass.Int32)
kv_frag.store(kv_idx_global)
bias_frag = cute.make_fragment(1, dtype)
bias_frag[0] = token_bias[kv_frag[0]]
return tSrS_ssa + (bias_frag.load()).to(cutlass.Float32)
@cute.jit
def score_mod_stress_xor_pattern(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
"""XOR-based pattern using index bits."""
offset_k = seqlen_info.offset_k
kv_idx_global = kv_idx + offset_k
token_bias = aux_tensors[0]
dtype = token_bias.element_type
xor_logical = q_idx ^ kv_idx
pattern_logical = xor_logical & cute.full_like(xor_logical, 0xFF)
pattern_bias = pattern_logical.to(cutlass.Float32) * cute.full_like(tSrS_ssa, 0.001)
kv_frag = cute.make_fragment(1, cutlass.Int32)
kv_frag.store(kv_idx_global)
bias_frag = cute.make_fragment(1, dtype)
bias_frag[0] = token_bias[kv_frag[0]]
return (
tSrS_ssa
+ pattern_bias
+ (bias_frag.load()).to(cutlass.Float32) * cute.full_like(tSrS_ssa, 0.1)
)
@cute.jit
def score_mod_debug_global_idx(
tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors
):
# Don't read from aux_tensors at all - just add the global index as bias
offset_k = seqlen_info.offset_k
kv_idx_global = kv_idx + offset_k
bias = kv_idx_global.to(cutlass.Float32) * cute.full_like(tSrS_ssa, 0.001)
return tSrS_ssa + bias
# =============================================================================
# Eager reference functions
# =============================================================================
def identity_eager(score, b, h, q_idx, kv_idx):
return score
def causal_eager(score, b, h, q_idx, kv_idx):
return torch.where(q_idx >= kv_idx, score, float("-inf"))
def rel_bias_eager(score, b, h, q_idx, kv_idx):
return score + torch.abs(q_idx - kv_idx)
def rel_bias_x2_eager(score, b, h, q_idx, kv_idx):
return score + 2 * torch.abs(q_idx - kv_idx)
def times_two_eager(score, b, h, q_idx, kv_idx):
return score * 2
def alibi_eager(score, b, h, q_idx, kv_idx):
slope = 2 ** (-8 * (h + 1) / 8)
return score - slope * torch.abs(q_idx - kv_idx)
def sliding_window_eager(score, b, h, q_idx, kv_idx):
return torch.where(torch.abs(q_idx - kv_idx) <= 256, score, float("-inf"))
def block_diagonal_eager(score, b, h, q_idx, kv_idx):
return torch.where(q_idx // 64 == kv_idx // 64, score, float("-inf"))
def causal_v2_eager(score, b, h, q_idx, kv_idx):
return torch.where(q_idx - kv_idx >= 0, score, float("-inf"))
def batch_bias_factory(bias_tensor):
def mod(score, b, h, q_idx, kv_idx):
return score + bias_tensor[b]
return mod
def dual_buffer_factory(head_bias, pos_bias):
def mod(score, b, h, q_idx, kv_idx):
return score + head_bias[h] + pos_bias[q_idx]
return mod
def packed_kv_bias_factory(bias_tensor, cu_seqlens_k):
def mod(score, b, h, q_idx, kv_idx):
# Calculate valid length for this sequence
start = cu_seqlens_k[b]
seq_len = cu_seqlens_k[b+1] - start
# Clamp kv_idx.
safe_kv_idx = torch.clamp(kv_idx, max=seq_len - 1)
return score + bias_tensor[start + safe_kv_idx]
return mod
def packed_q_bias_factory(bias_tensor, cu_seqlens_q):
def mod(score, b, h, q_idx, kv_idx):
start = cu_seqlens_q[b]
seq_len = cu_seqlens_q[b+1] - start
# Clamp q_idx
safe_q_idx = torch.clamp(q_idx, max=seq_len - 1)
return score + bias_tensor[start + safe_q_idx]
return mod
def packed_rel_plus_kv_bias_factory(bias_tensor, cu_seqlens_k):
def mod(score, b, h, q_idx, kv_idx):
start = cu_seqlens_k[b]
seq_len = cu_seqlens_k[b+1] - start
# Clamp kv_idx
safe_kv_idx = torch.clamp(kv_idx, max=seq_len - 1)
rel_bias = torch.abs(q_idx - kv_idx).float() * 0.1
return score + rel_bias + bias_tensor[start + safe_kv_idx]
return mod
def packed_q_and_kv_bias_factory(q_bias, kv_bias, cu_seqlens_q, cu_seqlens_k):
def mod(score, b, h, q_idx, kv_idx):
# Handle Q bounds
q_start = cu_seqlens_q[b]
q_len = cu_seqlens_q[b+1] - q_start
safe_q_idx = torch.clamp(q_idx, max=q_len - 1)
# Handle KV bounds
kv_start = cu_seqlens_k[b]
kv_len = cu_seqlens_k[b+1] - kv_start
safe_kv_idx = torch.clamp(kv_idx, max=kv_len - 1)
return score + q_bias[q_start + safe_q_idx] + kv_bias[kv_start + safe_kv_idx]
return mod
def packed_logical_rel_plus_kv_bias_factory(bias_tensor, cu_seqlens_k):
def mod(score, b, h, q_idx, kv_idx):
rel_bias = torch.abs(q_idx - kv_idx).float() * 0.01
return score + rel_bias + bias_tensor[cu_seqlens_k[b] + kv_idx]
return mod
def stress_complex_arithmetic_factory(bias, cu_seqlens_q):
def mod(score, b, h, q_idx, kv_idx):
# Use absolute value instead of squaring to avoid overflow with large sequences
rel_pos_abs = torch.abs(q_idx - kv_idx)
q_global = cu_seqlens_q[b] + q_idx
bias_q = bias[q_global]
scale = (b + 1) * (h + 1) * 0.001
rel_bias = rel_pos_abs * 0.001
return score + rel_bias + bias_q * scale
return mod
def stress_conditional_mask_factory(token_bias, cu_seqlens_q, cu_seqlens_k):
def mod(score, b, h, q_idx, kv_idx):
kv_global = cu_seqlens_k[b] + kv_idx
bias_val = token_bias[kv_global]
is_causal = q_idx >= kv_idx
q_global = cu_seqlens_q[b] + q_idx
global_diff = q_global - kv_global
is_nearby = torch.abs(global_diff) <= 512
both_conditions = is_causal & is_nearby
return torch.where(both_conditions, score + bias_val, float("-inf"))
return mod
def stress_multi_buffer_factory(
batch_bias,
head_scale,
q_pos_bias,
kv_pos_bias,
rel_pos_scale,
cu_seqlens_q,
cu_seqlens_k,
max_rel_pos=512,
):
def mod(score, b, h, q_idx, kv_idx):
bb_val = batch_bias[b]
hs_val = head_scale[h]
qpb_val = q_pos_bias[cu_seqlens_q[b] + q_idx]
kvpb_val = kv_pos_bias[cu_seqlens_k[b] + kv_idx]
rel_idx = (q_idx - kv_idx + max_rel_pos).clamp(0, max_rel_pos * 2)
rps_val = rel_pos_scale[rel_idx]
return score * hs_val + bb_val + qpb_val + kvpb_val + rps_val * 0.1
return mod
def stress_global_offset_factory(token_bias, cu_seqlens_k):
def mod(score, b, h, q_idx, kv_idx):
return score + token_bias[cu_seqlens_k[b] + kv_idx]
return mod
def stress_xor_pattern_factory(token_bias, cu_seqlens_q, cu_seqlens_k):
def mod(score, b, h, q_idx, kv_idx):
xor_logical = q_idx ^ kv_idx
pattern_bias = (xor_logical & 0xFF).float() * 0.001
kv_global = cu_seqlens_k[b] + kv_idx
return score + pattern_bias + token_bias[kv_global] * 0.1
return mod
def debug_global_idx_factory(bias, cu_seqlens_k):
offsets = cu_seqlens_k.tolist()
def mod(score, b, h, q_idx, kv_idx):
global_kv = offsets[b] + kv_idx
return score + global_kv.float() * 0.001
return mod
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "tests/cute/score_mod_definitions.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 517,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:tests/cute/test_score_mod_varlen.py | import pytest
import torch
from torch.nn.attention.flex_attention import flex_attention
from flash_attn.cute.interface import _flash_attn_fwd
from test_score_mod import _generate_block_kvcache
from score_mod_definitions import (
# TensorSSA-based score mods
score_mod_alibi,
score_mod_batch_bias,
score_mod_block_diagonal,
score_mod_causal,
score_mod_causal_v2,
score_mod_debug_global_idx,
score_mod_dual_buffer,
score_mod_global_kv_bias,
score_mod_global_logical_rel_plus_kv_bias,
score_mod_global_q_and_kv_bias,
score_mod_global_q_bias,
score_mod_global_rel_plus_kv_bias,
score_mod_identity,
score_mod_rel_bias,
score_mod_rel_bias_x2,
score_mod_sliding_window,
score_mod_stress_complex_arithmetic,
score_mod_stress_conditional_mask,
score_mod_stress_global_offset,
score_mod_stress_multi_buffer,
score_mod_stress_xor_pattern,
score_mod_times_two,
) # isort: split
from score_mod_definitions import (
score_mod_identity_vectorized,
score_mod_causal_vectorized,
score_mod_rel_bias as score_mod_rel_bias_vectorized,
score_mod_rel_bias_x2_vectorized,
score_mod_times_two_vectorized,
score_mod_alibi_vectorized,
score_mod_batch_bias_vectorized,
score_mod_dual_buffer_vectorized,
) # isort: split
from score_mod_definitions import (
# Eager (torch) reference score mods
identity_eager,
causal_eager,
rel_bias_eager,
rel_bias_x2_eager,
times_two_eager,
alibi_eager,
sliding_window_eager,
block_diagonal_eager,
causal_v2_eager,
batch_bias_factory,
dual_buffer_factory,
packed_kv_bias_factory,
packed_q_bias_factory,
packed_rel_plus_kv_bias_factory,
packed_q_and_kv_bias_factory,
packed_logical_rel_plus_kv_bias_factory,
stress_complex_arithmetic_factory,
stress_conditional_mask_factory,
stress_multi_buffer_factory,
stress_global_offset_factory,
stress_xor_pattern_factory,
debug_global_idx_factory,
)
IS_SM90 = torch.cuda.get_device_capability()[0] == 9
# =============================================================================
# Test pairs
# =============================================================================
# (cute_score_mod, eager_factory_or_fn, aux_type)
# aux_type: None, "batch", "dual_buffer"
# All score_mods use 7-arg signature: (tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors)
TEST_PAIRS_NO_GLOBAL = [
(score_mod_identity, identity_eager, None),
(score_mod_causal, causal_eager, None),
(score_mod_rel_bias, rel_bias_eager, None),
(score_mod_rel_bias_x2, rel_bias_x2_eager, None),
(score_mod_times_two, times_two_eager, None),
(score_mod_alibi, alibi_eager, None),
(score_mod_sliding_window, sliding_window_eager, None),
(score_mod_block_diagonal, block_diagonal_eager, None),
(score_mod_causal_v2, causal_v2_eager, None),
(score_mod_batch_bias, batch_bias_factory, "batch"),
(score_mod_dual_buffer, dual_buffer_factory, "dual_buffer"),
]
# Test pairs to compare vectorized score_mods: (cute_jit_function, cute_jit_function_vectorized)
TEST_PAIRS_VECTORIZED_NO_GLOBAL = [
(score_mod_identity, score_mod_identity_vectorized, None),
(score_mod_causal, score_mod_causal_vectorized, None),
(score_mod_rel_bias, score_mod_rel_bias_vectorized, None),
(score_mod_rel_bias_x2, score_mod_rel_bias_x2_vectorized, None),
(score_mod_times_two, score_mod_times_two_vectorized, None),
(score_mod_alibi, score_mod_alibi_vectorized, None),
(score_mod_batch_bias, score_mod_batch_bias_vectorized, "batch"),
(score_mod_dual_buffer, score_mod_dual_buffer_vectorized, "dual_buffer"),
]
# (cute_score_mod, eager_factory, aux_type, requires_global)
# aux_type: "kv", "q", "q_and_kv", "q_concat", "kv_with_cu", "multi_buffer"
# requires_global: "q" (needs varlen_q), "kv" (needs varlen_k), "both" (needs both)
# All score_mods use 7-arg signature and compute global indices from seqlen_info
TEST_PAIRS_WITH_GLOBAL = [
(score_mod_global_kv_bias, packed_kv_bias_factory, "kv", "kv"),
(score_mod_global_q_bias, packed_q_bias_factory, "q", "q"),
(score_mod_global_rel_plus_kv_bias, packed_rel_plus_kv_bias_factory, "kv", "kv"),
(score_mod_global_q_and_kv_bias, packed_q_and_kv_bias_factory, "q_and_kv", "both"),
(
score_mod_global_logical_rel_plus_kv_bias,
packed_logical_rel_plus_kv_bias_factory,
"kv",
"kv",
),
(
score_mod_stress_complex_arithmetic,
stress_complex_arithmetic_factory,
"q_concat",
"q",
),
(
score_mod_stress_conditional_mask,
stress_conditional_mask_factory,
"kv_with_cu",
"both",
),
(
score_mod_stress_multi_buffer,
stress_multi_buffer_factory,
"multi_buffer",
"both",
),
(score_mod_stress_global_offset, stress_global_offset_factory, "kv", "kv"),
(score_mod_stress_xor_pattern, stress_xor_pattern_factory, "kv_with_cu", "kv"),
(score_mod_debug_global_idx, debug_global_idx_factory, "kv", "kv"),
]
SEQLEN_CONFIGS = [
([1], [1]),
([1, 1], [1, 1]),
([2, 3], [2, 3]),
([8, 16], [8, 16]),
([32, 32], [32, 32]),
([64, 128], [64, 128]),
([64, 56, 128], [64, 56, 128]),
([256, 512], [256, 512]),
([113, 203], [113, 203]),
([239, 1], [239, 1]),
([64], [64]),
([128, 128], [128, 128]),
([32, 32, 32, 32], [32, 32, 32, 32]),
([16, 32, 64, 128, 256], [16, 32, 64, 128, 256]),
([1, 1024], [1, 1024]),
([1024, 1], [1024, 1]),
([1, 256, 1], [1, 256, 1]),
([256, 1, 256], [256, 1, 256]),
([17, 33, 65], [17, 33, 65]),
([64, 128], [32, 64]),
([100, 100], [50, 50]),
([256, 512, 256], [128, 256, 128]),
([2, 1], [16384, 32 * 1024]),
([1, 1], [128 * 1024] * 2),
([2, 1], [8192, 8192]),
([1, 3], [8192, 8192]),
([3, 3], [8192, 8192]),
([128, 128], [8192, 8192]),
([2, 2, 2], [8 * 1024] * 3),
([2, 1], [1024 * 32, 16384]),
([1, 2], [1024 * 32, 16384]),
([1, 1, 1], [128 * 1024] * 3),
([1, 1, 1], [256 * 1024] * 3),
]
VEC_SIZES_TO_CHECK_EQUALITY = [1, 4]
# =============================================================================
# Helper functions
# =============================================================================
def run_cute_flash(
q,
k,
v,
score_mod,
aux_tensors=None,
pack_gqa=False,
cu_seqlens_q=None,
cu_seqlens_k=None,
page_table=None,
seqused_k=None,
):
"""Run CuTE flash attention."""
if cu_seqlens_q is not None or cu_seqlens_k is not None:
out = torch.empty_like(q)
_flash_attn_fwd(
q,
k,
v,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
seqused_k=seqused_k,
page_table=page_table,
return_lse=True,
score_mod=score_mod,
out=out,
lse=None,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
)
return out
out = torch.empty_like(q)
_flash_attn_fwd(
q,
k,
v,
seqused_k=seqused_k,
page_table=page_table,
return_lse=True,
score_mod=score_mod,
out=out,
lse=None,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
)
return out
def run_flex_varlen_ref(q, k, v, cu_seqlens_q, cu_seqlens_k, score_mod, dtype=None):
"""Run flex_attention per-sequence for varlen reference."""
if cu_seqlens_q is not None:
num_batches = len(cu_seqlens_q) - 1
else:
num_batches = len(cu_seqlens_k) - 1
results = []
for i in range(num_batches):
# Get Q slice
if cu_seqlens_q is not None:
q_slice = (
q[cu_seqlens_q[i] : cu_seqlens_q[i + 1]].unsqueeze(0).transpose(1, 2)
)
else:
q_slice = q[i : i + 1].transpose(1, 2)
# Get K/V slices
if cu_seqlens_k is not None:
k_slice = (
k[cu_seqlens_k[i] : cu_seqlens_k[i + 1]].unsqueeze(0).transpose(1, 2)
)
v_slice = (
v[cu_seqlens_k[i] : cu_seqlens_k[i + 1]].unsqueeze(0).transpose(1, 2)
)
else:
k_slice = k[i : i + 1].transpose(1, 2)
v_slice = v[i : i + 1].transpose(1, 2)
if dtype is not None:
q_slice, k_slice, v_slice = (
q_slice.to(dtype),
k_slice.to(dtype),
v_slice.to(dtype),
)
def wrapped_mod(score, b, h, q_idx, kv_idx):
return score_mod(score, i, h, q_idx, kv_idx)
out = flex_attention(
q_slice,
k_slice,
v_slice,
score_mod=wrapped_mod,
enable_gqa=q_slice.shape[1] != k_slice.shape[1],
)
results.append(out.transpose(1, 2).squeeze(0))
return torch.cat(results, dim=0)
def setup_tensors(seqlens_q, seqlens_k, varlen_q, varlen_k, num_heads, head_dim, dtype):
"""Create Q, K, V tensors and cu_seqlens based on varlen flags."""
batch_size = len(seqlens_q)
if varlen_q:
total_q = sum(seqlens_q)
q = torch.randn(total_q, num_heads, head_dim, device="cuda", dtype=dtype)
cu_seqlens_q = torch.tensor(
[0] + list(torch.tensor(seqlens_q).cumsum(0).tolist()),
device="cuda",
dtype=torch.int32,
)
else:
seqlen_q = seqlens_q[0] # All sequences have the same length for non-varlen
q = torch.randn(
batch_size, seqlen_q, num_heads, head_dim, device="cuda", dtype=dtype
)
cu_seqlens_q = None
if varlen_k:
total_k = sum(seqlens_k)
k = torch.randn(total_k, num_heads, head_dim, device="cuda", dtype=dtype)
v = torch.randn(total_k, num_heads, head_dim, device="cuda", dtype=dtype)
cu_seqlens_k = torch.tensor(
[0] + list(torch.tensor(seqlens_k).cumsum(0).tolist()),
device="cuda",
dtype=torch.int32,
)
else:
seqlen_k = seqlens_k[0] # All sequences have the same length for non-varlen
k = torch.randn(
batch_size, seqlen_k, num_heads, head_dim, device="cuda", dtype=dtype
)
v = torch.randn(
batch_size, seqlen_k, num_heads, head_dim, device="cuda", dtype=dtype
)
cu_seqlens_k = None
return q, k, v, cu_seqlens_q, cu_seqlens_k
def prepare_ref_tensors(
q, k, v, cu_seqlens_q, cu_seqlens_k, varlen_q, varlen_k, batch_size, seqlens_q
):
"""Prepare tensors for flex_attention reference (handle mixed varlen formats)."""
num_heads = q.shape[1] if varlen_q else q.shape[2]
if not varlen_q and varlen_k:
seqlen_q = q.shape[1]
q_packed = q.reshape(-1, num_heads, q.shape[-1])
ref_cu_seqlens_q = torch.tensor(
[seqlen_q * i for i in range(batch_size + 1)],
device="cuda",
dtype=torch.int32,
)
return q_packed, k, v, ref_cu_seqlens_q, cu_seqlens_k
if varlen_q and not varlen_k:
return q, k, v, cu_seqlens_q, None
return q, k, v, cu_seqlens_q, cu_seqlens_k
def check_results(
out_cute,
out_ref_fp32,
out_pt,
test_name,
rtol=2,
extra_atol=1e-4,
seqlens_q=None,
cu_seqlens_q=None,
):
"""Compare CuTE output against references."""
assert not torch.isnan(out_cute).any(), f"{test_name}: NaN in output"
assert torch.isfinite(out_cute).all(), f"{test_name}: Inf in output"
varlen_q = cu_seqlens_q is not None
if varlen_q:
# Unpack and compare per-sequence
assert seqlens_q is not None, "varlen_q requires use of seqlens_q"
num_seqs = len(seqlens_q)
max_cute_error = 0.0
max_pt_error = 0.0
for i in range(num_seqs):
# Extract sequences using cu_seqlens (all outputs are in packed format)
start_q = cu_seqlens_q[i]
end_q = cu_seqlens_q[i + 1]
cute_seq = out_cute[start_q:end_q]
ref_seq = out_ref_fp32[start_q:end_q]
pt_seq = out_pt[start_q:end_q]
max_cute_error = max(
max_cute_error, (cute_seq - ref_seq).abs().max().item()
)
max_pt_error = max(max_pt_error, (pt_seq - ref_seq).abs().max().item())
cute_error = max_cute_error
pt_error = max_pt_error
else:
# Direct comparison
pt_error = (out_pt - out_ref_fp32).abs().max().item()
cute_error = (out_cute - out_ref_fp32).abs().max().item()
fwd_atol = 2 * (out_ref_fp32 + 0.3 - 0.3 - out_ref_fp32).abs().max().item()
print(f"\n{test_name}:")
print(f" PyTorch vs FP32 ref: {pt_error:.2e}")
print(f" CuTE vs FP32 ref: {cute_error:.2e}")
tol = rtol * pt_error + fwd_atol + extra_atol
assert cute_error <= tol, (
f"{test_name}: CuTE error {cute_error:.2e} exceeds tolerance {tol:.2e}"
)
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("varlen_q", [True, False])
@pytest.mark.parametrize("varlen_k", [True, False])
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(4, 2)])
@pytest.mark.parametrize("seqlens_q,seqlens_k", SEQLEN_CONFIGS)
@pytest.mark.parametrize("score_mod_tuple", TEST_PAIRS_NO_GLOBAL)
def test_varlen_with_score_mod(
seqlens_q,
seqlens_k,
varlen_q,
varlen_k,
qhead_per_kvhead,
num_kv_heads,
dtype,
score_mod_tuple,
):
"""Test varlen attention with score_mod functions that don't use global indices.
Covers: both varlen, varlen Q only, varlen K only.
Skips: neither varlen
"""
if not varlen_q and not varlen_k:
pytest.skip(
"At least one of varlen_q or varlen_k must be True for varlen tests"
)
# For non-varlen dimension, all sequences must have same length
if not varlen_q:
seqlens_q = [seqlens_q[0]] * len(seqlens_q)
if not varlen_k:
seqlens_k = [seqlens_k[0]] * len(seqlens_k)
torch.random.manual_seed(42)
cute_score_mod, eager_factory, aux_type = score_mod_tuple
num_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
head_dim = 128
batch_size = len(seqlens_q)
q, k, v, cu_seqlens_q, cu_seqlens_k = setup_tensors(
seqlens_q, seqlens_k, varlen_q, varlen_k, num_heads, head_dim, dtype
)
if pack_gqa:
if varlen_k:
k = k[:, :num_kv_heads, :].clone()
v = v[:, :num_kv_heads, :].clone()
else:
k = k[:, :, :num_kv_heads, :].clone()
v = v[:, :, :num_kv_heads, :].clone()
aux_tensors = None
if aux_type == "batch":
bias = torch.zeros(batch_size, device="cuda", dtype=dtype) * 0.1
aux_tensors = [bias]
eager_score_mod = eager_factory(bias)
elif aux_type == "dual_buffer":
seqlen_q = seqlens_q[0] if not varlen_q else max(seqlens_q)
head_bias = torch.randn(num_heads, device="cuda", dtype=dtype) * 0.2
pos_bias = torch.arange(seqlen_q, device="cuda", dtype=dtype) * 0.01
aux_tensors = [head_bias, pos_bias]
eager_score_mod = eager_factory(head_bias, pos_bias)
else:
eager_score_mod = eager_factory
# Prepare reference tensors
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k = prepare_ref_tensors(
q, k, v, cu_seqlens_q, cu_seqlens_k, varlen_q, varlen_k, batch_size, seqlens_q
)
out_ref_fp32 = run_flex_varlen_ref(
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k, eager_score_mod, dtype=torch.float32
)
out_pt = run_flex_varlen_ref(
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k, eager_score_mod, dtype=dtype
)
out_cute = run_cute_flash(
q,
k,
v,
cute_score_mod,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
)
if not varlen_q and varlen_k:
seqlen_q = q.shape[1]
out_ref_fp32 = out_ref_fp32.reshape(batch_size, seqlen_q, num_heads, head_dim)
out_pt = out_pt.reshape(batch_size, seqlen_q, num_heads, head_dim)
assert out_cute.shape == out_ref_fp32.shape, (
f"Shape mismatch: {out_cute.shape} vs {out_ref_fp32.shape}"
)
test_name = f"{cute_score_mod.__name__} (varlen_q={varlen_q}, varlen_k={varlen_k})"
extra_atol = 2e-3
check_results(
out_cute,
out_ref_fp32,
out_pt,
test_name,
extra_atol=extra_atol,
seqlens_q=seqlens_q if varlen_q else None,
cu_seqlens_q=cu_seqlens_q if varlen_q else None,
)
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("varlen_q", [True, False])
@pytest.mark.parametrize("varlen_k", [True, False])
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(4, 2)])
@pytest.mark.parametrize("seqlens_q,seqlens_k", SEQLEN_CONFIGS)
@pytest.mark.parametrize("score_mod_vec_tuple", TEST_PAIRS_VECTORIZED_NO_GLOBAL)
def test_varlen_with_score_mod_vectorized(
seqlens_q,
seqlens_k,
varlen_q,
varlen_k,
qhead_per_kvhead,
num_kv_heads,
dtype,
score_mod_vec_tuple,
):
"""Tests equality between original and vectorized versions of score mods"""
if not varlen_q and not varlen_k:
pytest.skip(
"At least one of varlen_q or varlen_k must be True for varlen tests"
)
# For non-varlen dimension, all sequences must have same length
if not varlen_q:
seqlens_q = [seqlens_q[0]] * len(seqlens_q)
if not varlen_k:
seqlens_k = [seqlens_k[0]] * len(seqlens_k)
torch.random.manual_seed(42)
cute_score_mod, cute_vectorized_score_mod, aux_type = score_mod_vec_tuple
num_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
head_dim = 128
batch_size = len(seqlens_q)
q, k, v, cu_seqlens_q, cu_seqlens_k = setup_tensors(
seqlens_q, seqlens_k, varlen_q, varlen_k, num_heads, head_dim, dtype
)
aux_tensors = None
if aux_type == "batch":
bias = torch.zeros(batch_size, device="cuda", dtype=dtype) * 0.1
aux_tensors = [bias]
elif aux_type == "dual_buffer":
seqlen_q = seqlens_q[0] if not varlen_q else max(seqlens_q)
head_bias = torch.randn(num_heads, device="cuda", dtype=dtype) * 0.2
pos_bias = torch.arange(seqlen_q, device="cuda", dtype=dtype) * 0.01
aux_tensors = [head_bias, pos_bias]
if pack_gqa:
if varlen_k:
k = k[:, :num_kv_heads, :].clone()
v = v[:, :num_kv_heads, :].clone()
else:
k = k[:, :, :num_kv_heads, :].clone()
v = v[:, :, :num_kv_heads, :].clone()
out_ref = run_cute_flash(
q,
k,
v,
cute_score_mod,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
)
for vec_size in VEC_SIZES_TO_CHECK_EQUALITY:
cute_vectorized_score_mod.__vec_size__ = vec_size
out = run_cute_flash(
q,
k,
v,
cute_vectorized_score_mod,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
)
assert torch.equal(out, out_ref)
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("varlen_q", [True, False])
@pytest.mark.parametrize("varlen_k", [True, False])
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(1, 1), (4, 2)])
@pytest.mark.parametrize("seqlens_q,seqlens_k", SEQLEN_CONFIGS)
@pytest.mark.parametrize("score_mod_tuple", TEST_PAIRS_WITH_GLOBAL)
def test_varlen_with_global_idx_score_mod(
seqlens_q,
seqlens_k,
varlen_q,
varlen_k,
qhead_per_kvhead,
num_kv_heads,
dtype,
score_mod_tuple,
):
"""Test varlen attention with score_mod functions that use global indices.
These score_mods compute q_idx_global and/or kv_idx_global from seqlen_info for packed tensor indexing.
Skips tests where required global indices aren't available.
"""
if not varlen_q and not varlen_k:
pytest.skip(
"At least one of varlen_q or varlen_k must be True for varlen tests"
)
cute_score_mod, eager_factory, aux_type, requires_global = score_mod_tuple
# Skip if score_mod requires global indices we can't provide
if requires_global == "q" and not varlen_q:
pytest.skip(f"{cute_score_mod.__name__} requires varlen_q for q_idx_global")
if requires_global == "kv" and not varlen_k:
pytest.skip(f"{cute_score_mod.__name__} requires varlen_k for kv_idx_global")
if requires_global == "both" and (not varlen_q or not varlen_k):
pytest.skip(f"{cute_score_mod.__name__} requires both varlen_q and varlen_k")
# For non-varlen dimension, all sequences must have same length
if not varlen_q:
seqlens_q = [seqlens_q[0]] * len(seqlens_q)
if not varlen_k:
seqlens_k = [seqlens_k[0]] * len(seqlens_k)
torch.random.manual_seed(42)
num_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
head_dim = 128
batch_size = len(seqlens_q)
max_rel_pos = 512
total_q = sum(seqlens_q)
total_k = sum(seqlens_k)
cu_seqlens_q = torch.tensor(
[0] + list(torch.tensor(seqlens_q).cumsum(0).tolist()),
device="cuda",
dtype=torch.int32,
)
cu_seqlens_k = torch.tensor(
[0] + list(torch.tensor(seqlens_k).cumsum(0).tolist()),
device="cuda",
dtype=torch.int32,
)
if varlen_q:
q = torch.randn(total_q, num_heads, head_dim, device="cuda", dtype=dtype)
else:
seqlen_q = seqlens_q[0]
q = torch.randn(
batch_size, seqlen_q, num_heads, head_dim, device="cuda", dtype=dtype
)
if varlen_k:
k = torch.randn(total_k, num_heads, head_dim, device="cuda", dtype=dtype)
v = torch.randn(total_k, num_heads, head_dim, device="cuda", dtype=dtype)
else:
seqlen_k = seqlens_k[0]
k = torch.randn(
batch_size, seqlen_k, num_heads, head_dim, device="cuda", dtype=dtype
)
v = torch.randn(
batch_size, seqlen_k, num_heads, head_dim, device="cuda", dtype=dtype
)
if pack_gqa:
if varlen_k:
k = k[:, :num_kv_heads, :].clone()
v = v[:, :num_kv_heads, :].clone()
else:
k = k[:, :, :num_kv_heads, :].clone()
v = v[:, :, :num_kv_heads, :].clone()
# Setup aux tensors based on indexing type
if aux_type == "kv":
bias = torch.randn(total_k, device="cuda", dtype=dtype) * 0.1
aux_tensors = [bias]
eager_score_mod = eager_factory(bias, cu_seqlens_k)
elif aux_type == "q":
bias = torch.randn(total_q, device="cuda", dtype=dtype) * 0.1
aux_tensors = [bias]
eager_score_mod = eager_factory(bias, cu_seqlens_q)
elif aux_type == "q_and_kv":
q_bias = torch.randn(total_q, device="cuda", dtype=dtype) * 0.1
kv_bias = torch.randn(total_k, device="cuda", dtype=dtype) * 0.1
aux_tensors = [q_bias, kv_bias]
eager_score_mod = eager_factory(q_bias, kv_bias, cu_seqlens_q, cu_seqlens_k)
elif aux_type == "q_concat":
bias = torch.randn(total_q, device="cuda", dtype=dtype) * 0.1
aux_tensors = [bias]
eager_score_mod = eager_factory(bias, cu_seqlens_q)
elif aux_type == "kv_with_cu":
kv_bias = torch.randn(total_k, device="cuda", dtype=dtype) * 0.1
aux_tensors = [kv_bias]
eager_score_mod = eager_factory(kv_bias, cu_seqlens_q, cu_seqlens_k)
elif aux_type == "multi_buffer":
batch_bias = torch.randn(batch_size, device="cuda", dtype=dtype) * 0.1
head_scale = torch.randn(num_heads, device="cuda", dtype=dtype) * 0.1 + 1.0
q_pos_bias = torch.randn(total_q, device="cuda", dtype=dtype) * 0.1
kv_pos_bias = torch.randn(total_k, device="cuda", dtype=dtype) * 0.1
rel_pos_scale = (
torch.randn(max_rel_pos * 2 + 1, device="cuda", dtype=dtype) * 0.1
)
aux_tensors = [batch_bias, head_scale, q_pos_bias, kv_pos_bias, rel_pos_scale]
eager_score_mod = eager_factory(
batch_bias,
head_scale,
q_pos_bias,
kv_pos_bias,
rel_pos_scale,
cu_seqlens_q,
cu_seqlens_k,
max_rel_pos,
)
else:
raise ValueError(f"Unknown aux_type: {aux_type}")
# Prepare reference tensors for flex_attention
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k = prepare_ref_tensors(
q, k, v, cu_seqlens_q, cu_seqlens_k, varlen_q, varlen_k, batch_size, seqlens_q
)
out_ref_fp32 = run_flex_varlen_ref(
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k, eager_score_mod, dtype=torch.float32
)
out_pt = run_flex_varlen_ref(
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k, eager_score_mod, dtype=dtype
)
kernel_cu_seqlens_q = cu_seqlens_q if varlen_q else None
kernel_cu_seqlens_k = cu_seqlens_k if varlen_k else None
out_cute = run_cute_flash(
q,
k,
v,
cute_score_mod,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
cu_seqlens_q=kernel_cu_seqlens_q,
cu_seqlens_k=kernel_cu_seqlens_k,
)
if varlen_q:
out_ref_final = out_ref_fp32
out_pt_final = out_pt
out_cute_final = out_cute
else:
seqlen_q = seqlens_q[0]
out_ref_final = out_ref_fp32.reshape(batch_size, seqlen_q, num_heads, head_dim)
out_pt_final = out_pt.reshape(batch_size, seqlen_q, num_heads, head_dim)
out_cute_final = out_cute
assert out_cute_final.shape == out_ref_final.shape, (
f"Shape mismatch: {out_cute_final.shape} vs {out_ref_final.shape}"
)
test_name = f"{cute_score_mod.__name__} (varlen_q={varlen_q}, varlen_k={varlen_k}, {aux_type})"
check_results(
out_cute_final,
out_ref_final,
out_pt_final,
test_name,
extra_atol=1e-3,
seqlens_q=seqlens_q if varlen_q else None,
cu_seqlens_q=cu_seqlens_q if varlen_q else None,
)
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("page_size", [None, 128])
@pytest.mark.parametrize("varlen_q", [True, False])
@pytest.mark.parametrize("varlen_k", [True, False])
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(4, 2)])
@pytest.mark.parametrize("seqlens_q,seqlens_k", SEQLEN_CONFIGS)
@pytest.mark.parametrize("score_mod_tuple", TEST_PAIRS_NO_GLOBAL)
def test_varlen_score_mod_kvcache(
seqlens_q,
seqlens_k,
varlen_q,
varlen_k,
qhead_per_kvhead,
num_kv_heads,
page_size,
dtype,
score_mod_tuple,
):
"""Test varlen attention with score_mod and paged KV cache."""
if IS_SM90 and page_size is not None:
pytest.xfail("paged KV not supported on SM90")
if not varlen_q and not varlen_k:
pytest.skip(
"At least one of varlen_q or varlen_k must be True for varlen tests"
)
if page_size is not None and varlen_k:
pytest.skip("Paged KV requires batched (non-varlen) K")
if not varlen_q:
seqlens_q = [seqlens_q[0]] * len(seqlens_q)
if not varlen_k:
seqlens_k = [seqlens_k[0]] * len(seqlens_k)
# Skip if page_size doesn't divide seqlens evenly (for simplicity)
if page_size is not None and not varlen_k:
if seqlens_k[0] % page_size != 0:
pytest.skip("page_size must divide seqlen_k")
torch.random.manual_seed(42)
cute_score_mod, eager_factory, aux_type = score_mod_tuple
num_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
head_dim = 128
batch_size = len(seqlens_q)
device = "cuda"
# Setup tensors
q, k, v, cu_seqlens_q, cu_seqlens_k = setup_tensors(
seqlens_q, seqlens_k, varlen_q, varlen_k, num_heads, head_dim, dtype
)
if pack_gqa:
if varlen_k:
k = k[:, :num_kv_heads, :].clone()
v = v[:, :num_kv_heads, :].clone()
else:
k = k[:, :, :num_kv_heads, :].clone()
v = v[:, :, :num_kv_heads, :].clone()
page_table = None
k_cache_paged = None
v_cache_paged = None
k_cache = k
v_cache = v
if page_size is not None:
seqlen_k = seqlens_k[0]
(
k_cache_bhsd,
v_cache_bhsd,
page_table,
k_cache_paged,
v_cache_paged,
num_blocks,
) = _generate_block_kvcache(
seqlen_k, page_size, batch_size, num_kv_heads, head_dim, device, dtype
)
k_cache = k_cache_bhsd.transpose(1, 2) # BHSD -> BSHD
v_cache = v_cache_bhsd.transpose(1, 2)
seqused_k = torch.tensor(seqlens_k, dtype=torch.int32, device=device)
else:
seqused_k = None
# Setup aux tensors and eager score_mod
aux_tensors = None
if aux_type == "batch":
bias = torch.zeros(batch_size, device=device, dtype=dtype) * 0.1
aux_tensors = [bias]
eager_score_mod = eager_factory(bias)
elif aux_type == "dual_buffer":
seqlen_q = seqlens_q[0] if not varlen_q else max(seqlens_q)
head_bias = torch.randn(num_heads, device=device, dtype=dtype) * 0.2
pos_bias = torch.arange(seqlen_q, device=device, dtype=dtype) * 0.01
aux_tensors = [head_bias, pos_bias]
eager_score_mod = eager_factory(head_bias, pos_bias)
else:
eager_score_mod = eager_factory
# Prepare reference tensors
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k = prepare_ref_tensors(
q,
k_cache,
v_cache,
cu_seqlens_q,
cu_seqlens_k,
varlen_q,
varlen_k,
batch_size,
seqlens_q,
)
out_ref_fp32 = run_flex_varlen_ref(
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k, eager_score_mod, dtype=torch.float32
)
out_pt = run_flex_varlen_ref(
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k, eager_score_mod, dtype=dtype
)
k_input = k_cache_paged if page_size is not None else k_cache
v_input = v_cache_paged if page_size is not None else v_cache
out_cute = run_cute_flash(
q,
k_input,
v_input,
cute_score_mod,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
cu_seqlens_q=cu_seqlens_q if varlen_q else None,
cu_seqlens_k=cu_seqlens_k if (varlen_k and page_size is None) else None,
page_table=page_table if page_size is not None else None,
seqused_k=seqused_k if page_size is not None else None,
)
if not varlen_q and varlen_k:
seqlen_q = q.shape[1]
out_ref_fp32 = out_ref_fp32.reshape(batch_size, seqlen_q, num_heads, head_dim)
out_pt = out_pt.reshape(batch_size, seqlen_q, num_heads, head_dim)
assert out_cute.shape == out_ref_fp32.shape, (
f"Shape mismatch: {out_cute.shape} vs {out_ref_fp32.shape}"
)
test_name = f"{cute_score_mod.__name__} (varlen_q={varlen_q}, varlen_k={varlen_k}, paged={page_size is not None})"
extra_atol = 2e-3
check_results(
out_cute,
out_ref_fp32,
out_pt,
test_name,
extra_atol=extra_atol,
seqlens_q=seqlens_q if varlen_q else None,
cu_seqlens_q=cu_seqlens_q if varlen_q else None,
)
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("page_size", [None, 128])
@pytest.mark.parametrize("varlen_q", [True, False])
@pytest.mark.parametrize("varlen_k", [True, False])
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(1, 1), (4, 2)])
@pytest.mark.parametrize("seqlens_q,seqlens_k", SEQLEN_CONFIGS)
@pytest.mark.parametrize("score_mod_tuple", TEST_PAIRS_WITH_GLOBAL)
def test_varlen_score_mod_with_paged_kvcache_global(
seqlens_q,
seqlens_k,
varlen_q,
varlen_k,
qhead_per_kvhead,
num_kv_heads,
page_size,
dtype,
score_mod_tuple,
):
"""Test varlen attention with global idx score_mod and paged KV cache."""
if IS_SM90 and page_size is not None:
pytest.xfail("paged KV not supported on SM90")
if page_size is not None and varlen_k:
pytest.skip("Paged KV cache requires batched (non-varlen) K")
if not varlen_q and not varlen_k:
pytest.skip(
"At least one of varlen_q or varlen_k must be True for varlen tests"
)
if not varlen_q:
seqlens_q = [seqlens_q[0]] * len(seqlens_q)
if not varlen_k:
seqlens_k = [seqlens_k[0]] * len(seqlens_k)
if page_size is not None and not varlen_k:
if seqlens_k[0] % page_size != 0:
pytest.skip("page_size must divide seqlen_k")
cute_score_mod, eager_factory, aux_type, requires_global = score_mod_tuple
if requires_global == "q" and not varlen_q:
pytest.skip(f"{cute_score_mod.__name__} requires varlen_q for q_idx_global")
if requires_global == "kv" and not varlen_k:
pytest.skip(f"{cute_score_mod.__name__} requires varlen_k for kv_idx_global")
if requires_global == "both" and (not varlen_q or not varlen_k):
pytest.skip(f"{cute_score_mod.__name__} requires both varlen_q and varlen_k")
torch.random.manual_seed(42)
num_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
head_dim = 128
batch_size = len(seqlens_q)
max_rel_pos = 512
device = "cuda"
total_q = sum(seqlens_q)
total_k = sum(seqlens_k)
cu_seqlens_q = torch.tensor(
[0] + list(torch.tensor(seqlens_q).cumsum(0).tolist()),
device=device,
dtype=torch.int32,
)
cu_seqlens_k = torch.tensor(
[0] + list(torch.tensor(seqlens_k).cumsum(0).tolist()),
device=device,
dtype=torch.int32,
)
cu_seqlens_k_for_kernel = cu_seqlens_k if varlen_k else None
q = torch.randn(total_q, num_heads, head_dim, device=device, dtype=dtype)
if varlen_k:
k = torch.randn(total_k, num_heads, head_dim, device=device, dtype=dtype)
v = torch.randn(total_k, num_heads, head_dim, device=device, dtype=dtype)
else:
seqlen_k = seqlens_k[0]
k = torch.randn(
batch_size, seqlen_k, num_heads, head_dim, device=device, dtype=dtype
)
v = torch.randn(
batch_size, seqlen_k, num_heads, head_dim, device=device, dtype=dtype
)
if pack_gqa:
if varlen_k:
k = k[:, :num_kv_heads, :].clone()
v = v[:, :num_kv_heads, :].clone()
else:
k = k[:, :, :num_kv_heads, :].clone()
v = v[:, :, :num_kv_heads, :].clone()
page_table = None
k_cache_paged = None
v_cache_paged = None
k_cache = k
v_cache = v
if page_size is not None:
seqlen_k = seqlens_k[0]
(
k_cache_bhsd,
v_cache_bhsd,
page_table,
k_cache_paged,
v_cache_paged,
num_blocks,
) = _generate_block_kvcache(
seqlen_k, page_size, batch_size, num_kv_heads, head_dim, device, dtype
)
k_cache = k_cache_bhsd.transpose(1, 2) # BHSD -> BSHD
v_cache = v_cache_bhsd.transpose(1, 2)
seqused_k = torch.tensor(seqlens_k, dtype=torch.int32, device=device)
else:
seqused_k = None
if aux_type == "kv":
bias = torch.randn(total_k, device=device, dtype=dtype) * 0.1
aux_tensors = [bias]
eager_score_mod = eager_factory(bias, cu_seqlens_k)
elif aux_type == "q":
bias = torch.randn(total_q, device=device, dtype=dtype) * 0.1
aux_tensors = [bias]
eager_score_mod = eager_factory(bias, cu_seqlens_q)
elif aux_type == "q_and_kv":
q_bias = torch.randn(total_q, device=device, dtype=dtype) * 0.1
kv_bias = torch.randn(total_k, device=device, dtype=dtype) * 0.1
aux_tensors = [q_bias, kv_bias]
eager_score_mod = eager_factory(q_bias, kv_bias, cu_seqlens_q, cu_seqlens_k)
elif aux_type == "q_concat":
bias = torch.randn(total_q, device=device, dtype=dtype) * 0.1
aux_tensors = [bias]
eager_score_mod = eager_factory(bias, cu_seqlens_q)
elif aux_type == "kv_with_cu":
kv_bias = torch.randn(total_k, device=device, dtype=dtype) * 0.1
aux_tensors = [kv_bias]
eager_score_mod = eager_factory(kv_bias, cu_seqlens_q, cu_seqlens_k)
elif aux_type == "multi_buffer":
batch_bias = torch.randn(batch_size, device=device, dtype=dtype) * 0.1
head_scale = torch.randn(num_heads, device=device, dtype=dtype) * 0.1 + 1.0
q_pos_bias = torch.randn(total_q, device=device, dtype=dtype) * 0.1
kv_pos_bias = torch.randn(total_k, device=device, dtype=dtype) * 0.1
rel_pos_scale = (
torch.randn(max_rel_pos * 2 + 1, device=device, dtype=dtype) * 0.1
)
aux_tensors = [batch_bias, head_scale, q_pos_bias, kv_pos_bias, rel_pos_scale]
eager_score_mod = eager_factory(
batch_bias,
head_scale,
q_pos_bias,
kv_pos_bias,
rel_pos_scale,
cu_seqlens_q,
cu_seqlens_k,
max_rel_pos,
)
else:
raise ValueError(f"Unknown aux_type: {aux_type}")
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k = prepare_ref_tensors(
q,
k_cache,
v_cache,
cu_seqlens_q,
cu_seqlens_k,
True,
varlen_k,
batch_size,
seqlens_q,
)
out_ref_fp32 = run_flex_varlen_ref(
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k, eager_score_mod, dtype=torch.float32
)
out_pt = run_flex_varlen_ref(
q_ref, k_ref, v_ref, ref_cu_q, ref_cu_k, eager_score_mod, dtype=dtype
)
# Run CuTE
k_input = k_cache_paged if page_size is not None else k_cache
v_input = v_cache_paged if page_size is not None else v_cache
out_cute = torch.empty_like(q)
_flash_attn_fwd(
q,
k_input,
v_input,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k_for_kernel if page_size is None else None,
seqused_k=seqused_k if page_size is not None else None,
page_table=page_table,
return_lse=True,
score_mod=cute_score_mod,
out=out_cute,
lse=None,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
)
assert out_cute.shape == out_ref_fp32.shape, (
f"Shape mismatch: {out_cute.shape} vs {out_ref_fp32.shape}"
)
test_name = f"{cute_score_mod.__name__} (paged={page_size is not None}, {aux_type})"
check_results(
out_cute,
out_ref_fp32,
out_pt,
test_name,
extra_atol=1e-3,
seqlens_q=seqlens_q,
cu_seqlens_q=cu_seqlens_q,
)
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "tests/cute/test_score_mod_varlen.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1033,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:tests/cute/test_utils.py | """Unit tests for flash_attn.cute.utils module."""
import functools
from flash_attn.cute import utils as cute_utils
from flash_attn.cute.utils import hash_callable
class TestHashCallable:
"""Tests for hash_callable function."""
def test_returns_cute_hash_when_set_on_function(self):
"""hash_callable should return __cute_hash__ immediately when set on function."""
def my_func():
pass
my_func.__cute_hash__ = "precomputed-hash-123"
result = hash_callable(my_func)
assert result == "precomputed-hash-123"
def test_returns_cute_hash_from_wrapped_function(self):
"""hash_callable should check __wrapped__ for __cute_hash__."""
def inner_func():
pass
inner_func.__cute_hash__ = "inner-hash-456"
# Simulate a decorator that sets __wrapped__
@functools.wraps(inner_func)
def wrapper_func():
return inner_func()
result = hash_callable(wrapper_func)
assert result == "inner-hash-456"
def test_prefers_wrapper_cute_hash_over_wrapped(self):
"""When both wrapper and wrapped have __cute_hash__, prefer wrapper."""
def inner_func():
pass
inner_func.__cute_hash__ = "inner-hash"
@functools.wraps(inner_func)
def wrapper_func():
return inner_func()
wrapper_func.__cute_hash__ = "wrapper-hash"
result = hash_callable(wrapper_func)
assert result == "wrapper-hash"
def test_fallback_to_source_hashing(self):
"""hash_callable should fall back to source hashing when no __cute_hash__."""
def my_func():
return 42
result = hash_callable(my_func)
# Should return a hex string (SHA256 hash)
assert isinstance(result, str)
assert len(result) == 64 # SHA256 produces 64 hex chars
def test_same_function_produces_same_hash(self):
"""Same function should produce consistent hash."""
def my_func():
return 42
hash1 = hash_callable(my_func)
hash2 = hash_callable(my_func)
assert hash1 == hash2
def test_different_functions_produce_different_hashes(self):
"""Different functions should produce different hashes."""
def func_a():
return 1
def func_b():
return 2
hash_a = hash_callable(func_a)
hash_b = hash_callable(func_b)
assert hash_a != hash_b
def test_fast_path_skips_expensive_hashing(self):
"""When __cute_hash__ is set, expensive operations should be skipped."""
def my_func():
pass
my_func.__cute_hash__ = "fast-hash"
# Mock at module level since we loaded it directly
original_getsource = cute_utils.inspect.getsource
call_tracker = {"getsource": 0, "sha256": 0}
def tracking_getsource(*args, **kwargs):
call_tracker["getsource"] += 1
return original_getsource(*args, **kwargs)
original_sha256 = cute_utils.hashlib.sha256
def tracking_sha256(*args, **kwargs):
call_tracker["sha256"] += 1
return original_sha256(*args, **kwargs)
cute_utils.inspect.getsource = tracking_getsource
cute_utils.hashlib.sha256 = tracking_sha256
try:
result = hash_callable(my_func)
finally:
cute_utils.inspect.getsource = original_getsource
cute_utils.hashlib.sha256 = original_sha256
# Neither inspect.getsource nor hashlib.sha256 should be called
assert call_tracker["getsource"] == 0, "getsource should not be called"
assert call_tracker["sha256"] == 0, "sha256 should not be called"
assert result == "fast-hash"
def test_fast_path_on_wrapped_skips_expensive_hashing(self):
"""When __cute_hash__ is on __wrapped__, expensive operations should be skipped."""
def inner_func():
pass
inner_func.__cute_hash__ = "wrapped-fast-hash"
@functools.wraps(inner_func)
def wrapper_func():
return inner_func()
# Mock at module level
original_getsource = cute_utils.inspect.getsource
call_tracker = {"getsource": 0, "sha256": 0}
def tracking_getsource(*args, **kwargs):
call_tracker["getsource"] += 1
return original_getsource(*args, **kwargs)
original_sha256 = cute_utils.hashlib.sha256
def tracking_sha256(*args, **kwargs):
call_tracker["sha256"] += 1
return original_sha256(*args, **kwargs)
cute_utils.inspect.getsource = tracking_getsource
cute_utils.hashlib.sha256 = tracking_sha256
try:
result = hash_callable(wrapper_func)
finally:
cute_utils.inspect.getsource = original_getsource
cute_utils.hashlib.sha256 = original_sha256
assert call_tracker["getsource"] == 0, "getsource should not be called"
assert call_tracker["sha256"] == 0, "sha256 should not be called"
assert result == "wrapped-fast-hash"
def test_closure_values_affect_hash(self):
"""Functions with different closure values should have different hashes."""
value1 = 10
value2 = 20
def make_func(val):
def inner():
return val
return inner
func1 = make_func(value1)
func2 = make_func(value2)
hash1 = hash_callable(func1)
hash2 = hash_callable(func2)
assert hash1 != hash2
class TestHashCallableIntegration:
"""Integration tests for hash_callable with flash attention."""
def test_repeated_calls_use_cached_hash(self):
"""Repeated calls with same score_mod should use cached/fast hash path."""
def score_mod(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, aux_tensors):
return tSrS_ssa
# Set __cute_hash__ to simulate Inductor-generated code
score_mod.__cute_hash__ = "inductor-generated-hash"
original_getsource = cute_utils.inspect.getsource
call_count = [0] # Use list for mutable counter in nested function
def counting_getsource(*args, **kwargs):
call_count[0] += 1
return original_getsource(*args, **kwargs)
cute_utils.inspect.getsource = counting_getsource
try:
# Call hash_callable multiple times
hash1 = hash_callable(score_mod)
hash2 = hash_callable(score_mod)
hash3 = hash_callable(score_mod)
finally:
cute_utils.inspect.getsource = original_getsource
# getsource should never be called because __cute_hash__ is set
assert call_count[0] == 0, f"getsource was called {call_count[0]} times"
assert hash1 == hash2 == hash3 == "inductor-generated-hash"
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "tests/cute/test_utils.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:hopper/test_torch_compile_and_export.py | import torch
from flash_attn_interface import flash_attn_func
from torch import nn
class EfficienctMultiHeadAttention(nn.Module):
def __init__(self, embed_size, num_heads, dropout=0.0, use_flash_attn=True):
super().__init__()
assert embed_size % num_heads == 0, f"{embed_size=} {num_heads=}"
self.embed_size = embed_size
self.num_heads = num_heads
self.head_dim = embed_size // num_heads
self.use_flash_attn = use_flash_attn and (flash_attn_func is not None)
self.qkv_proj = nn.Linear(embed_size, 3 * embed_size)
self.out_proj = nn.Linear(embed_size, embed_size)
self.dropout = dropout
def forward(self, x, attention_mask=None):
N, seq_length, _ = x.shape
qkv = self.qkv_proj(x)
q, k, v = qkv.chunk(3, dim=-1)
q = q.view(N, seq_length, self.num_heads, self.head_dim)
k = k.view(N, seq_length, self.num_heads, self.head_dim)
v = v.view(N, seq_length, self.num_heads, self.head_dim)
if self.use_flash_attn and attention_mask is None:
out = flash_attn_func(
q, k, v
)
out = out.reshape(N, seq_length, self.embed_size)
out = self.out_proj(out)
return out
def create_model(batch_size=16, sequence_length=256, embedding_dim=2048, num_heads=16):
model = EfficienctMultiHeadAttention(embedding_dim, num_heads).cuda().bfloat16()
input_tensor = torch.randn(batch_size, sequence_length, embedding_dim).cuda().bfloat16()
return model, input_tensor
def test_export_model():
model, input_tensor = create_model()
expected = torch.compile(model, backend="aot_eager")(input_tensor)
loss = expected.sum()
loss.backward()
ep = torch.export.export(model, (input_tensor,))
got = ep.module()(input_tensor,)
assert torch.equal(expected, got)
loss_2 = got.sum()
loss_2.backward()
assert torch.equal(loss, loss_2)
def test_compile_and_package_model():
model, input_tensor = create_model()
expected = torch.compile(model, backend="aot_eager")(input_tensor)
exported = torch.export.export(model, (input_tensor,))
torch._inductor.aoti_compile_and_package(
exported,
package_path="model.pt2",
)
compiled_model = torch._inductor.package.load_package("model.pt2")
out = compiled_model(input_tensor,)
assert torch.equal(expected, out)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "hopper/test_torch_compile_and_export.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:tests/cute/test_flash_attn_race_condition.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
import math
import itertools
import os
import pytest
import torch
from einops import rearrange, repeat
try:
from flash_attn.layers.rotary import apply_rotary_emb
except ImportError:
apply_rotary_emb = None
from flash_attn.cute.testing import (
attention_ref,
generate_qkv,
generate_random_padding_mask,
pad_input,
unpad_input,
)
from flash_attn.cute.interface import (
flash_attn_func,
flash_attn_varlen_func,
flash_attn_combine,
_flash_attn_bwd,
)
DISABLE_SPLIT = os.getenv("FLASH_ATTENTION_DISABLE_SPLIT", "FALSE") == "TRUE"
IS_SM90 = torch.cuda.get_device_capability()[0] == 9
INCREASED_TRIALS = False
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["gqa"])
# @pytest.mark.parametrize("has_learnable_sink", [False, True])
@pytest.mark.parametrize("has_learnable_sink", [False])
# @pytest.mark.parametrize("has_qv", [False, True])
@pytest.mark.parametrize("has_qv", [False])
# @pytest.mark.parametrize("deterministic", [False, True])
@pytest.mark.parametrize("deterministic", [True])
# @pytest.mark.parametrize("softcap", [0.0, 15.0])
@pytest.mark.parametrize("softcap", [0.0])
# @pytest.mark.parametrize("local_enum", [0, 1, 2, 3])
@pytest.mark.parametrize("local_enum", [0, 1])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [True])
# @pytest.mark.parametrize("d", [64, 128])
# @pytest.mark.parametrize("d", [128, 192])
@pytest.mark.parametrize("d", [64, 128, 192])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(4224, 4224),
(2000, 4000),
],
)
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(128, 128)])
def test_flash_attn_output(
seqlen_q,
seqlen_k,
d,
causal,
local_enum,
softcap,
deterministic,
has_qv,
has_learnable_sink,
mha_type,
dtype,
):
local = local_enum > 0
if local and causal:
pytest.skip()
device = "cuda"
# set seed
torch.random.manual_seed(0)
torch.cuda.empty_cache()
torch.cuda.synchronize()
batch_size = 9 if seqlen_k <= 2048 else 2
# batch_size = 1
nheads = 6
# nheads = 1
nheads_kv = nheads if mha_type == "mha" else (3 if mha_type == "gqa" else 1)
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
# dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
dv_vals = [128] if d == 192 else [d]
if dtype == torch.float8_e4m3fn:
dv_vals = [d]
# attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0]
attention_chunk_vals = [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
q_ref = torch.randn(
batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref
)
if softcap > 0.0:
# Ensure the values of qk are at least within softcap range.
q_ref = q_ref * softcap / 4
q_ref = q_ref.to(dtype).to(dtype_ref).requires_grad_()
k_ref = (
torch.randn(
batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
.requires_grad_()
)
v_ref = (
torch.randn(
batch_size, seqlen_k, nheads_kv, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
.requires_grad_()
)
if has_qv:
qv_ref = (
torch.randn(
batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
)
else:
qv_ref = None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (
(None, None) if not local else torch.randint(0, seqlen_k, (2,)).tolist()
)
if local_enum == 2:
window_size = (None, -window_size[1])
elif local_enum == 3:
window_size = (-window_size[0], None)
if local:
print("window size = ", window_size)
# window_size = (-1, -1) if not local else (16, 0)
if has_learnable_sink:
learnable_sink = torch.randn(nheads, dtype=torch.bfloat16, device=device)
else:
learnable_sink = None
if dtype == torch.float8_e4m3fn:
q_descale, k_descale, v_descale = [
torch.rand(batch_size, nheads_kv, device=device, dtype=torch.float32)
* 2
for _ in range(3)
]
else:
q_descale, k_descale, v_descale = None, None, None
q, k, v = [x.detach().to(dtype).requires_grad_() for x in (q_ref, k_ref, v_ref)]
qv = qv_ref.detach().to(dtype).requires_grad_() if has_qv else None
out_ref, attn_ref = attention_ref(
q_ref,
k_ref,
v_ref,
None,
None,
causal=causal,
qv=qv_ref,
q_descale=q_descale,
k_descale=k_descale,
v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
)
out_pt, attn_pt = attention_ref(
q_ref,
k_ref,
v_ref,
None,
None,
causal=causal,
qv=qv_ref,
q_descale=q_descale,
k_descale=k_descale,
v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
upcast=False,
reorder_ops=True,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None,
)
# k_extended = repeat(k_ref, "b s h d -> b s (h k) d", k=nheads // nheads_kv)
# qk = torch.einsum('bshd,bthd->bhst', q_ref, k_extended).float()
# # if qv is not None:
# # qk += torch.einsum('bshd,bthd->bhst', qv_ref, v_ref).float()
# m = qk.amax(-1, keepdim=True)
# s_tmp = torch.exp((qk - m) / math.sqrt(d))
# exp_sum = s_tmp.sum(-1)
# # qk = torch.einsum('bthd,bshd->bhts', q_ref.float() / math.sqrt(d), k_ref.float())
# # lse_ref = torch.logsumexp(qk, dim=-1)
# Numerical error if we just do any arithmetic on out_ref
fwd_atol = 2 * (out_ref + 0.3 - 0.3 - out_ref).abs().max().item()
rtol = 2 if softcap == 0.0 else 3
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
# num_splits_vals = [1, 3]
# pack_gqa_vals = [False, True, None]
# SplitKV is not supported for hdim >= 192
pack_gqa_vals = [False]
# num_splits_vals = [1, 3] if d < 192 and not DISABLE_SPLIT else [1]
num_splits_vals = [1]
for pack_gqa, num_splits in itertools.product(pack_gqa_vals, num_splits_vals):
out, lse = flash_attn_func(
q,
k,
v,
causal=causal,
# qv=qv,
# q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
# attention_chunk=attention_chunk,
softcap=softcap,
learnable_sink=learnable_sink,
pack_gqa=pack_gqa,
num_splits=num_splits,
deterministic=deterministic,
)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
# if not causal:
# print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (out - out_ref).abs().max().item() <= rtol * (
out_pt - out_ref
).abs().max().item() + fwd_atol
if (
dtype != torch.float8_e4m3fn
and not has_qv
and not dv > 256
and not attention_chunk != 0
and softcap == 0.0
and ((dv == d and d <= 128) or (d == 192 and dv == 128))
and learnable_sink is None
# and False
):
if IS_SM90 and mha_type != "mha":
pytest.xfail("SM90 backward: GQA/MQA has tensor layout issue (qhead_per_kvhead > 1)")
if IS_SM90 and local:
pytest.xfail("SM90 backward: local attention not supported yet")
if d == 192 and local:
pytest.xfail("hdim 192 backward: local attention not supported yet")
g = torch.randn_like(out)
# do_o = ((g.float() * out.float()).sum(-1)).transpose(1, 2)
dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
# print(f"dO_O max diff: {(softmax_d - do_o).abs().max().item()}")
# assert (softmax_d - do_o).abs().max().item() <= 1e-5
# assert dq_accum.abs().max().item() == 0.0
# dS = torch.einsum('bthd,bshd->bhts', g.float(), v.float())
# P = torch.softmax(qk, -1)
# dP = P * (dS - do_o.transpose(1, 2).unsqueeze(1))
# dQ = torch.einsum('bhts,bshd->bthd', dP, k.float())
# dV = torch.einsum('bhts,bthd->bshd', P, g.float())
# dK = torch.einsum('bhts,bthd->bshd', dP, q.float())
# breakpoint()
# dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
dq_ref, dk_ref, dv_ref = torch.autograd.grad(
out_ref, (q_ref, k_ref, v_ref), g
)
dq_pt, dk_pt, dv_pt = torch.autograd.grad(out_pt, (q_ref, k_ref, v_ref), g)
print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
# breakpoint()
dq_atol = 2 * (dq_ref + 0.3 - 0.3 - dq_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dq - dq_ref).abs().max().item() <= rtol * (
dq_pt - dq_ref
).abs().max().item() + dq_atol
dk_atol = 2 * (dk_ref + 0.3 - 0.3 - dk_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dk - dk_ref).abs().max().item() <= rtol * (
dk_pt - dk_ref
).abs().max().item() + dk_atol
dv_atol = 2 * (dv_ref + 0.3 - 0.3 - dv_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dv - dv_ref).abs().max().item() <= rtol * (
dv_pt - dv_ref
).abs().max().item() + dv_atol
num_iters = 10_000 if INCREASED_TRIALS else 1000
for i in range(num_iters):
dq2, dk2, dv2, = _flash_attn_bwd(
q, k, v, out, g, lse,
causal=causal,
window_size_left=window_size[0],
window_size_right=window_size[1],
deterministic=True,
)
diff_dq = (dq - dq2).abs()
max_idx = diff_dq.argmax()
print(f"dQ max diff: {diff_dq.max().item()}")
print(f" at index {max_idx.item()}: dQ={dq.flatten()[max_idx].item()}, dQ2={dq2.flatten()[max_idx].item()}")
diff_dk = (dk - dk2).abs()
max_idx = diff_dk.argmax()
print(f"dK max diff: {diff_dk.max().item()}")
print(f" at index {max_idx.item()}: dK={dk.flatten()[max_idx].item()}, dK2={dk2.flatten()[max_idx].item()}")
diff_dv = (dv - dv2).abs()
max_idx = diff_dv.argmax()
print(f"dV max diff: {diff_dv.max().item()}")
print(f" at index {max_idx.item()}: dV={dv.flatten()[max_idx].item()}, dV2={dv2.flatten()[max_idx].item()}")
# print(f"dQ max diff with myself: {(dq - dq2).abs().max().item()}")
# print(f"dK max diff with myself: {(dk - dk2).abs().max().item()}")
# print(f"dV max diff with myself: {(dv - dv2).abs().max().item()}")
# print(f"dQ mean diff with myself: {(dq - dq2).abs().mean().item()}")
# print(f"dK mean diff with myself: {(dk - dk2).abs().mean().item()}")
# print(f"dV mean diff with myself: {(dv - dv2).abs().mean().item()}")
assert torch.equal(dq, dq2)
assert torch.equal(dk, dk2)
assert torch.equal(dv, dv2)
print(f"✅ Iteration {i} passed!")
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["gqa"])
# @pytest.mark.parametrize("has_learnable_sink", [False, True])
@pytest.mark.parametrize("has_learnable_sink", [False])
# @pytest.mark.parametrize("has_qv", [False, True])
@pytest.mark.parametrize("has_qv", [False])
# @pytest.mark.parametrize("deterministic", [False, True])
@pytest.mark.parametrize("deterministic", [True])
# @pytest.mark.parametrize("softcap", [0.0, 15.0])
@pytest.mark.parametrize("softcap", [0.0])
# @pytest.mark.parametrize("local_enum", [0, 1, 2, 3])
@pytest.mark.parametrize("local_enum", [0, 1])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [True])
# @pytest.mark.parametrize("add_unused_qkv", [False, True])
@pytest.mark.parametrize("add_unused_qkv", [False])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192, 256])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128])
# @pytest.mark.parametrize("d", [64, 96, 128])
@pytest.mark.parametrize("d", [64, 128, 192])
# @pytest.mark.parametrize("d", [192])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(1024, 1024),
(2048, 2048),
],
)
@pytest.mark.parametrize("varlen_mode", ["random", "third", "full"])
# @pytest.mark.parametrize("varlen_mode", ["random"])
@pytest.mark.parametrize(
"zero_lengths_q, zero_lengths_k",
[
(False, False),
(True, False),
(False, True),
(True, True),
],
)
def test_flash_attn_varlen_output(
seqlen_q,
seqlen_k,
d,
add_unused_qkv,
causal,
local_enum,
softcap,
deterministic,
has_qv,
has_learnable_sink,
mha_type,
dtype,
varlen_mode,
zero_lengths_q,
zero_lengths_k,
):
local = local_enum > 0
if local and causal:
pytest.skip()
is_sm90 = torch.cuda.get_device_capability()[0] == 9
if is_sm90 and local:
pytest.xfail("bwd local attention not supported on sm90")
if is_sm90 and deterministic:
pytest.xfail("bwd deterministic not supported on sm90")
if (
causal or local
): # Right now reference only supports causal attention with seqlen_k == seqlen_q
seqlen_k = seqlen_q
device = "cuda"
# set seed
torch.random.manual_seed(seqlen_q + seqlen_k + d + int(causal) * 2 + int(local))
batch_size = 49 if seqlen_q <= 1024 else 7
nheads = 6
# nheads = 1
nheads_kv = nheads if mha_type == "mha" else (3 if mha_type == "gqa" else 1)
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
# dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
# dv_vals = [128] if d == 192 else ([d] if d != 128 else [64, d])
dv_vals = [128] if d == 192 else [d]
# attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0] if seqlen_q <= seqlen_k else [0]
attention_chunk_vals = [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
q_ref = torch.randn(
batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref
)
if softcap > 0.0:
# Ensure the values of qk are at least within softcap range.
q_ref = (q_ref * softcap / 4).detach().requires_grad_()
q_ref = q_ref.to(dtype).to(dtype_ref).requires_grad_()
k_ref = (
torch.randn(
batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
.requires_grad_()
)
v_ref = (
torch.randn(
batch_size, seqlen_k, nheads_kv, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
.requires_grad_()
)
if has_qv:
qv_ref = (
torch.randn(
batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
)
else:
qv_ref = None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (
(None, None) if not local else torch.randint(0, seqlen_k, (2,)).tolist()
)
if local_enum == 2:
window_size = (None, window_size[1])
elif local_enum == 3:
window_size = (window_size[0], None)
if local:
print("window size = ", window_size)
if has_learnable_sink:
learnable_sink = torch.randn(nheads, dtype=torch.bfloat16, device=device)
else:
learnable_sink = None
if dtype == torch.float8_e4m3fn:
q_descale, k_descale, v_descale = [
torch.rand(batch_size, nheads_kv, device=device, dtype=torch.float32)
* 2
for _ in range(3)
]
else:
q_descale, k_descale, v_descale = None, None, None
q, k, v = [x.detach().requires_grad_() for x in (q_ref, k_ref, v_ref)]
qv = qv_ref.detach() if has_qv else None
query_padding_mask = generate_random_padding_mask(
seqlen_q,
batch_size,
device,
mode=varlen_mode,
zero_lengths=zero_lengths_q,
)
key_padding_mask = generate_random_padding_mask(
seqlen_k,
batch_size,
device,
mode=varlen_mode,
zero_lengths=zero_lengths_k,
)
def _gen_unused_masks(padding_mask, add_unused, max_seq_len, bs, device):
if add_unused:
another_mask = generate_random_padding_mask(max_seq_len, bs, device)
attn_mask = torch.logical_and(padding_mask, another_mask)
unused_mask = torch.logical_xor(
torch.logical_or(padding_mask, another_mask), attn_mask
)
else:
attn_mask = padding_mask
unused_mask = None
return attn_mask, unused_mask
query_padding_mask, query_unused_mask = _gen_unused_masks(
query_padding_mask, add_unused_qkv, seqlen_q, batch_size, q.device
)
# query_padding_mask[:] = True
# query_unused_mask = None
key_padding_mask, key_unused_mask = _gen_unused_masks(
key_padding_mask, add_unused_qkv, seqlen_k, batch_size, k.device
)
if causal or local:
key_padding_mask = query_padding_mask
(
q_unpad,
k_unpad,
v_unpad,
qv_unpad,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
q,
k,
v,
qv,
output_pad_fn,
dq_pad_fn,
dk_pad_fn,
) = generate_qkv(
q,
k,
v,
query_padding_mask,
key_padding_mask,
qv=qv,
kvpacked=False,
query_unused_mask=query_unused_mask,
key_unused_mask=key_unused_mask,
)
print("cu_seqlens_q = ", cu_seqlens_q)
print("cu_seqlens_k = ", cu_seqlens_k)
q_unpad, k_unpad, v_unpad = [
x.detach().to(dtype).requires_grad_() for x in (q_unpad, k_unpad, v_unpad)
]
out_ref, attn_ref = attention_ref(
q_ref,
k_ref,
v_ref,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv_ref,
q_descale=q_descale,
k_descale=k_descale,
v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
)
out_pt, attn_pt = attention_ref(
q_ref,
k_ref,
v_ref,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv_ref,
q_descale=q_descale,
k_descale=k_descale,
v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
upcast=False,
reorder_ops=True,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None,
)
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
if query_unused_mask is not None:
q_zero_masking = rearrange(query_unused_mask, "b s -> b s 1 1")
# Numerical error if we just do any arithmetic on out_ref
fwd_atol = 2 * (out_ref + 0.3 - 0.3 - out_ref).abs().max().item()
rtol = 2 if softcap == 0.0 else 3
out_unpad, lse = flash_attn_varlen_func(
q_unpad,
k_unpad,
v_unpad,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
# max_seqlen_k,
# seqused_q=seqused_q,
# seqused_k=seqused_k,
max_seqlen_q=seqlen_q,
max_seqlen_k=seqlen_k,
causal=causal,
# qv=qv_unpad,
# q_descale=q_descale,
# k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
# attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
num_splits=1,
pack_gqa=False,
deterministic=deterministic,
)
out = output_pad_fn(out_unpad)
if query_unused_mask is not None:
out.masked_fill_(q_zero_masking, 0.0)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
# if not causal:
# print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most 3x the numerical error
# of a Pytorch implementation.
assert (out - out_ref).abs().max().item() <= rtol * (
out_pt - out_ref
).abs().max().item() + fwd_atol
if (
dtype != torch.float8_e4m3fn
and not has_qv
and not dv > 256
and not attention_chunk != 0
and ((dv == d and d <= 128) or (d == 192 and dv == 128))
and not has_learnable_sink
and not is_sm90
# and False
):
if d == 192 and local:
pytest.xfail("hdim 192 backward: local attention not supported yet")
g_unpad = torch.randn_like(out_unpad)
# do_o = ((g_unpad.float() * out_unpad.float()).sum(-1)).transpose(-1, -2)
# import flash_attn_3_cuda
# dq_unpad, dk_unpad, dv_unpad, softmax_d, dq_accum, lse_log2 = flash_attn_3_cuda.bwd_varlen(
# g_unpad,
# q_unpad,
# k_unpad,
# v_unpad,
# out_unpad,
# lse,
# None,
# None,
# None,
# cu_seqlens_q,
# cu_seqlens_k,
# None, None,
# max_seqlen_q,
# max_seqlen_k,
# d ** (-0.5),
# causal,
# window_size[0], window_size[1],
# softcap,
# deterministic,
# 0, # sm_margin
# )
dq_unpad, dk_unpad, dv_unpad = torch.autograd.grad(
out_unpad, (q_unpad, k_unpad, v_unpad), g_unpad
)
dq = dq_pad_fn(dq_unpad)
dk = dk_pad_fn(dk_unpad)
dv = dk_pad_fn(dv_unpad)
if key_unused_mask is not None:
k_zero_masking = rearrange(key_unused_mask, "b s -> b s 1 1")
dk.masked_fill_(k_zero_masking, 0.0)
dv.masked_fill_(k_zero_masking, 0.0)
if query_unused_mask is not None:
dq.masked_fill_(q_zero_masking, 0.0)
# print(f"dO_O max diff: {(softmax_d - do_o).abs().max().item()}")
# assert (softmax_d - do_o).abs().max().item() <= 1e-5
# assert dq_accum.abs().max().item() == 0.0
g = output_pad_fn(g_unpad)
# qk = torch.einsum('bthd,bshd->bhts', q / (d ** 0.5), k).float()
# qk = torch.masked_fill(qk, rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
# dS = torch.einsum('bthd,bshd->bhts', g.float(), v.float())
# P = torch.softmax(qk, -1)
# dP = P * (dS - (g.float() * out.float()).sum(-1).transpose(1, 2).unsqueeze(-1))
# dQ = torch.einsum('bhts,bshd->bthd', dP, k.float())
# dV = torch.einsum('bhts,bthd->bshd', P, g.float())
# dK = torch.einsum('bhts,bthd->bshd', dP, q.float())
# dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
dq_ref, dk_ref, dv_ref = torch.autograd.grad(
out_ref, (q_ref, k_ref, v_ref), g
)
dq_pt, dk_pt, dv_pt = torch.autograd.grad(out_pt, (q_ref, k_ref, v_ref), g)
print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
# breakpoint()
dq_atol = 2 * (dq_ref + 0.3 - 0.3 - dq_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dq - dq_ref).abs().max().item() <= rtol * (
dq_pt - dq_ref
).abs().max().item() + dq_atol
dk_atol = 2 * (dk_ref + 0.3 - 0.3 - dk_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dk - dk_ref).abs().max().item() <= rtol * (
dk_pt - dk_ref
).abs().max().item() + dk_atol
dv_atol = 2 * (dv_ref + 0.3 - 0.3 - dv_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dv - dv_ref).abs().max().item() <= rtol * (
dv_pt - dv_ref
).abs().max().item() + dv_atol
num_iters = 10_000 if INCREASED_TRIALS else 1000
for i in range(num_iters):
dq_unpad2, dk_unpad2, dv_unpad2 = _flash_attn_bwd(
q_unpad, k_unpad, v_unpad, out_unpad, g_unpad, lse,
causal=causal,
window_size_left=window_size[0],
window_size_right=window_size[1],
deterministic=True,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=seqlen_q,
max_seqlen_k=seqlen_k,
)
diff_dq = (dq_unpad - dq_unpad2).abs()
max_idx = diff_dq.argmax()
if i % 100 == 0:
print(f"dQ max diff: {diff_dq.max().item()}")
print(f" at index {max_idx.item()}: dQ={dq_unpad.flatten()[max_idx].item()}, dQ2={dq_unpad2.flatten()[max_idx].item()}")
diff_dk = (dk_unpad - dk_unpad2).abs()
max_idx = diff_dk.argmax()
if i % 100 == 0:
print(f"dK max diff: {diff_dk.max().item()}")
print(f" at index {max_idx.item()}: dK={dk_unpad.flatten()[max_idx].item()}, dK2={dk_unpad2.flatten()[max_idx].item()}")
diff_dv = (dv_unpad - dv_unpad2).abs()
max_idx = diff_dv.argmax()
if i % 100 == 0:
print(f"dV max diff: {diff_dv.max().item()}")
print(f" at index {max_idx.item()}: dV={dv_unpad.flatten()[max_idx].item()}, dV2={dv_unpad2.flatten()[max_idx].item()}")
assert torch.equal(dq_unpad, dq_unpad2)
assert torch.equal(dk_unpad, dk_unpad2)
assert torch.equal(dv_unpad, dv_unpad2)
if i % 100 == 0:
print(f"✅ Iteration {i} passed!")
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "tests/cute/test_flash_attn_race_condition.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 744,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:flash_attn/cute/paged_kv.py | from typing import Type
from dataclasses import dataclass
import cutlass
import cutlass.cute as cute
from cutlass.cute.nvgpu import cpasync
from cutlass import Int32, const_expr
from flash_attn.cute import utils
from quack.cute_dsl_utils import ParamsBase
from cutlass.cute import FastDivmodDivisor
import math
@dataclass
class PagedKVManager(ParamsBase):
mPageTable: cute.Tensor
mK_paged: cute.Tensor
mV_paged: cute.Tensor
thread_idx: Int32
page_size_divmod: FastDivmodDivisor
seqlen_k: Int32
leftpad_k: Int32
n_block_size: Int32
num_threads: cutlass.Constexpr[Int32]
head_dim_padded: cutlass.Constexpr[Int32]
head_dim_v_padded: cutlass.Constexpr[Int32]
gmem_threads_per_row: cutlass.Constexpr[Int32]
page_entry_per_thread: Int32
async_copy_elems: Int32
gmem_tiled_copy_KV: cute.TiledCopy
gmem_thr_copy_KV: cute.TiledCopy
tPrPage: cute.Tensor
tPrPageOffset: cute.Tensor
tKpK: cute.Tensor
tVpV: cute.Tensor
@staticmethod
def create(
mPageTable: cute.Tensor,
mK_paged: cute.Tensor,
mV_paged: cute.Tensor,
page_size_divmod: FastDivmodDivisor,
bidb: Int32,
bidh: Int32,
thread_idx: Int32,
seqlen_k: Int32,
leftpad_k: Int32,
n_block_size: cutlass.Constexpr[Int32],
head_dim_padded: cutlass.Constexpr[Int32],
head_dim_v_padded: cutlass.Constexpr[Int32],
num_threads: cutlass.Constexpr[Int32],
dtype: Type[cutlass.Numeric],
):
universal_copy_bits = 128
async_copy_elems = universal_copy_bits // dtype.width
dtype_bytes = dtype.width // 8
gmem_k_block_size = math.gcd(
head_dim_padded,
head_dim_v_padded,
128 // dtype_bytes,
)
assert gmem_k_block_size % async_copy_elems == 0
gmem_threads_per_row = gmem_k_block_size // async_copy_elems
assert cute.arch.WARP_SIZE % gmem_threads_per_row == 0
atom_async_copy = cute.make_copy_atom(
cpasync.CopyG2SOp(cache_mode=cpasync.LoadCacheMode.GLOBAL),
dtype,
num_bits_per_copy=universal_copy_bits,
)
thr_layout = cute.make_ordered_layout(
(num_threads // gmem_threads_per_row, gmem_threads_per_row),
order=(1, 0),
)
val_layout = cute.make_layout((1, async_copy_elems))
gmem_tiled_copy_KV = cute.make_tiled_copy_tv(atom_async_copy, thr_layout, val_layout)
gmem_thr_copy_KV = gmem_tiled_copy_KV.get_slice(thread_idx)
page_entry_per_thread = n_block_size // num_threads
tPrPage = cute.make_rmem_tensor((page_entry_per_thread,), Int32)
tPrPageOffset = cute.make_rmem_tensor((page_entry_per_thread,), Int32)
mPageTable = mPageTable[bidb, None]
mK_paged = mK_paged[None, None, bidh, None]
mV_paged = mV_paged[None, None, bidh, None]
cK = cute.make_identity_tensor((n_block_size, head_dim_padded))
tKcK = gmem_thr_copy_KV.partition_S(cK)
tKpK = utils.predicate_k(tKcK, limit=mK_paged.shape[1])
if const_expr(head_dim_padded == head_dim_v_padded):
tVpV = tKpK
else:
cV = cute.make_identity_tensor((n_block_size, head_dim_v_padded))
tVcV = gmem_thr_copy_KV.partition_S(cV)
tVpV = utils.predicate_k(tVcV, limit=mV_paged.shape[0])
return PagedKVManager(
mPageTable,
mK_paged,
mV_paged,
thread_idx,
page_size_divmod,
seqlen_k,
leftpad_k,
n_block_size,
num_threads,
head_dim_padded,
head_dim_v_padded,
gmem_threads_per_row,
page_entry_per_thread,
async_copy_elems,
gmem_tiled_copy_KV,
gmem_thr_copy_KV,
tPrPage,
tPrPageOffset,
tKpK,
tVpV,
)
@cute.jit
def load_page_table(self, n_block: Int32):
for i in cutlass.range(self.page_entry_per_thread, unroll=1):
row = (
i * self.num_threads
+ (self.thread_idx % self.gmem_threads_per_row)
* (self.num_threads // self.gmem_threads_per_row)
+ (self.thread_idx // self.gmem_threads_per_row)
)
row_idx = n_block * self.n_block_size + row
page_idx, page_offset = divmod(row_idx + self.leftpad_k, self.page_size_divmod)
is_valid = (
(i + 1) * self.num_threads <= self.n_block_size or row < self.n_block_size
) and row_idx < self.seqlen_k
page = self.mPageTable[page_idx] if is_valid else 0
self.tPrPage[i] = page
self.tPrPageOffset[i] = page_offset
@cute.jit
def compute_X_ptr(self, K_or_V: str):
tPrXPtr = cute.make_rmem_tensor((self.page_entry_per_thread,), cutlass.Int64)
for i in cutlass.range(self.page_entry_per_thread, unroll=1):
page = self.tPrPage[i]
page_offset = self.tPrPageOffset[i]
if const_expr(K_or_V == "K"):
tPrXPtr[i] = utils.elem_pointer(self.mK_paged, (page_offset, 0, page)).toint()
else:
tPrXPtr[i] = utils.elem_pointer(self.mV_paged, (0, page_offset, page)).toint()
return tPrXPtr
@cute.jit
def load_KV(self, n_block: Int32, sX: cute.Tensor, K_or_V: str):
assert K_or_V in ("K", "V")
tPrXPtr = self.compute_X_ptr(K_or_V)
# Finesse sX layout to be (M, N).
sX_pi = cute.make_tensor(
sX.iterator,
cute.make_layout(
(sX.shape[0][0], (sX.shape[0][1], sX.shape[2])),
stride=(sX.stride[0][0], (sX.stride[0][1], sX.stride[2])),
),
)
if const_expr(K_or_V == "V"):
# Need to transpose V
sX_pi = cute.make_tensor(sX_pi.iterator, cute.select(sX_pi.layout, mode=[1, 0]))
head_dim = self.head_dim_v_padded if const_expr(K_or_V == "V") else self.head_dim_padded
cX = cute.make_identity_tensor((self.n_block_size, head_dim))
tXsX = self.gmem_thr_copy_KV.partition_D(sX_pi)
tXcX = self.gmem_thr_copy_KV.partition_S(cX)
tXc0X = self.gmem_thr_copy_KV.get_slice(0).partition_S(cX)
seqlenk_row_limit = (
self.seqlen_k - n_block * self.n_block_size - tXcX[0][0] if n_block >= 0 else 0
)
for m in cutlass.range_constexpr(cute.size(tXsX, mode=[1])):
row_valid = tXc0X[0, m, 0][0] < seqlenk_row_limit
should_load = cute.make_fragment_like(tXsX[(0, None), m, 0], cute.Boolean)
should_load.fill(row_valid)
x_ptr_i64 = utils.shuffle_sync(
tPrXPtr[m // self.gmem_threads_per_row],
m % self.gmem_threads_per_row,
width=self.gmem_threads_per_row,
)
x_gmem_ptr = cute.make_ptr(
self.mK_paged.element_type, x_ptr_i64, cute.AddressSpace.gmem, assumed_align=16
)
mX_paged_cur = cute.make_tensor(x_gmem_ptr, cute.make_layout((head_dim,)))
mX_paged_cur_copy = cute.tiled_divide(mX_paged_cur, (self.async_copy_elems,))
for k in cutlass.range_constexpr(cute.size(tXsX, mode=[2])):
ki = tXcX[0, 0, k][1] // self.async_copy_elems
mX_paged_cur_copy_ki = mX_paged_cur_copy[None, ki]
tXsX_k = tXsX[None, m, k]
mX_paged_cur_copy_ki = cute.make_tensor(
mX_paged_cur_copy_ki.iterator, tXsX_k.layout
)
cute.copy(
self.gmem_tiled_copy_KV,
mX_paged_cur_copy_ki,
tXsX_k,
pred=should_load,
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/paged_kv.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/cute/compute_block_sparsity.py | from functools import partial
from typing import Callable, Optional, Tuple
import cutlass
import cutlass.cute as cute
import torch
from cutlass import Boolean, Int8, Int32, const_expr
from flash_attn.cute.block_sparsity import (
BlockSparseTensors,
BlockSparseTensorsTorch,
to_cute_block_sparse_tensors,
)
from flash_attn.cute.utils import hash_callable, scalar_to_ssa, ssa_to_scalar
from flash_attn.cute.seqlen_info import SeqlenInfoQK
class BlockSparsityKernel:
"""Block sparsity kernel for FlexAttention.
This kernel computes `mask_mod` for every token of each block
to determine if an n block is full, masked, or neither.
Writes block counts and indices to a BlockSparseTensors object.
When use_fast_sampling=True, uses 5-point sampling (4 corners + center)
which is much faster but only suitable for masks where this is sufficient.
TODO:
- optimize mask_mod evaluation
- varlen support
- transposed tensors for bwd pass
"""
def __init__(
self,
mask_mod: Callable,
tile_mn: Tuple[int, int],
compute_full_blocks: bool = True,
use_aux_tensors: bool = False,
use_fast_sampling: bool = False,
):
self.mask_mod = mask_mod
self.tile_mn = tile_mn
self.compute_full_blocks = compute_full_blocks
self.use_aux_tensors = use_aux_tensors
self.use_fast_sampling = use_fast_sampling
@cute.jit
def __call__(
self,
blocksparse_tensors: BlockSparseTensors,
seqlen_q: Int32,
seqlen_k: Int32,
aux_tensors: Optional[list] = None,
):
self.mask_cnt, self.mask_idx, self.full_cnt, self.full_idx = blocksparse_tensors
if const_expr(self.compute_full_blocks):
assert self.full_cnt is not None and self.full_idx is not None, (
"full block tensors must be provided when computing full blocks"
)
batch_size, num_heads, num_m_blocks, num_n_blocks = self.mask_idx.shape
# launch 1 CTA per m block
grid = [num_m_blocks, num_heads, batch_size]
if const_expr(self.use_fast_sampling):
num_threads = 5
self.num_warps = 1
else:
num_threads = self.tile_mn[0]
self.num_warps = (num_threads + 32 - 1) // 32
self.kernel(
self.mask_cnt,
self.mask_idx,
self.full_cnt,
self.full_idx,
num_n_blocks,
seqlen_q,
seqlen_k,
aux_tensors,
).launch(grid=grid, block=[num_threads, 1, 1])
@cute.kernel
def kernel(
self,
mask_cnt: cute.Tensor,
mask_idx: cute.Tensor,
full_cnt: cute.Tensor,
full_idx: cute.Tensor,
num_n_blocks: Int32,
seqlen_q: Int32,
seqlen_k: Int32,
aux_tensors: Optional[list] = None,
):
tidx, _, _ = cute.arch.thread_idx()
warp_idx = cute.arch.warp_idx()
lane_id = cute.arch.lane_idx()
m_block, head_idx, batch_idx = cute.arch.block_idx()
ssa = partial(scalar_to_ssa, dtype=Int32)
seqlen = SeqlenInfoQK.create(
batch_idx,
seqlen_q,
seqlen_k,
mCuSeqlensQ=None,
mCuSeqlensK=None,
mSeqUsedQ=None,
mSeqUsedK=None,
)
@cute.struct
class SharedStorage:
reduction_buffer_smem: cute.struct.Align[
cute.struct.MemRange[cutlass.Int8, 2 * self.num_warps], 1024
]
smem = cutlass.utils.SmemAllocator()
storage = smem.allocate(SharedStorage, 16)
reduction_buffer = storage.reduction_buffer_smem.get_tensor(
cute.make_layout((self.num_warps, 2))
)
num_mask_blocks = Int32(0)
num_full_blocks = Int32(0)
for n_block in cutlass.range(num_n_blocks, unroll_full=True):
m_base = m_block * self.tile_mn[0]
n_base = n_block * self.tile_mn[1]
if const_expr(self.use_fast_sampling):
# Fast path: 5-point sampling (4 corners + center)
# Clamps OOB indices to nearest in bounds.
thread_result = Boolean(False)
thread_is_valid = Boolean(False)
q_idx = Int32(0)
kv_idx = Int32(0)
if tidx == 0:
# Top-left corner (0, 0); always in bounds
q_idx = m_base
kv_idx = n_base
elif tidx == 1:
# Top-right corner
q_idx = m_base
kv_idx = cutlass.min(n_base + self.tile_mn[1] - 1, seqlen_k - 1)
elif tidx == 2:
# Bottom-left corner
q_idx = cutlass.min(m_base + self.tile_mn[0] - 1, seqlen_q - 1)
kv_idx = n_base
elif tidx == 3:
# Bottom-right corner
q_idx = cutlass.min(m_base + self.tile_mn[0] - 1, seqlen_q - 1)
kv_idx = cutlass.min(n_base + self.tile_mn[1] - 1, seqlen_k - 1)
elif tidx == 4:
# Center point
q_idx = m_base + (cutlass.min(seqlen_q - m_base, self.tile_mn[0])) // 2
kv_idx = n_base + (cutlass.min(seqlen_k - n_base, self.tile_mn[1])) // 2
else:
thread_is_valid = Boolean(False)
# Check bounds and determine if this thread has a valid index pair
if tidx < 5 and q_idx < seqlen_q and kv_idx < seqlen_k:
thread_is_valid = Boolean(True)
q_idx_ssa = ssa(q_idx)
kv_idx_ssa = ssa(kv_idx)
thread_result = ssa_to_scalar(
self.mask_mod(
ssa(batch_idx),
ssa(head_idx),
q_idx_ssa,
kv_idx_ssa,
seqlen,
aux_tensors,
)
)
else:
thread_is_valid = Boolean(False)
# Use vote_any_sync to see if any valid thread found unmasked or masked
# Only count results from threads that checked valid indices
has_unmasked = cute.arch.vote_any_sync(thread_result & thread_is_valid)
has_masked = cute.arch.vote_any_sync((Boolean(not thread_result)) & thread_is_valid)
else:
# Full path: check all elements in the block
# Track if this thread's row has any masked or unmasked elements
thread_has_unmasked = Boolean(False)
thread_has_masked = Boolean(False)
thread_is_valid = Boolean(False)
# Each thread handles 1 row
q_idx = m_base + tidx
kv_idx = Int32(0)
if tidx < self.tile_mn[0] and q_idx < seqlen_q:
thread_is_valid = Boolean(True)
q_idx_ssa = ssa(q_idx)
# Loop over all columns in this row
for c in cutlass.range(self.tile_mn[1], unroll_full=True):
kv_idx = n_base + c
kv_idx_ssa = ssa(kv_idx)
# Only check elements within valid sequence bounds
if kv_idx < seqlen_k:
# Direct scalar call
mask_val = ssa_to_scalar(
self.mask_mod(
ssa(batch_idx),
ssa(head_idx),
q_idx_ssa,
kv_idx_ssa,
seqlen,
aux_tensors,
)
)
# Update tracking flags
if mask_val:
thread_has_unmasked = Boolean(True)
else:
thread_has_masked = Boolean(True)
# Block-level reduction to combine results across all threads
# Only count votes from threads that checked valid indices
warp_has_unmasked_mask = cute.arch.vote_any_sync(
thread_has_unmasked & thread_is_valid
)
warp_has_masked_mask = cute.arch.vote_any_sync(thread_has_masked & thread_is_valid)
# lane 0 writes the ballot mask to shared memory
lane_id = tidx % 32
if lane_id == 0:
# Store as Int8
reduction_buffer[warp_idx, 0] = Int8(1) if warp_has_unmasked_mask else Int8(0)
reduction_buffer[warp_idx, 1] = Int8(1) if warp_has_masked_mask else Int8(0)
cute.arch.sync_threads()
# Thread 0 ORs all warp results together
has_unmasked = Boolean(False)
has_masked = Boolean(False)
if tidx == 0:
for w in cutlass.range(self.num_warps):
if reduction_buffer[w, 0]:
has_unmasked = Boolean(True)
if reduction_buffer[w, 1]:
has_masked = Boolean(True)
# Only thread 0 updates the output arrays (common to both paths)
if tidx == 0:
# Block classification based on what we found:
# - If has_masked and has_unmasked: partial block (needs masking)
# - If only has_unmasked: full block (no masking needed)
# - If only has_masked: skip this block entirely
is_partial = Boolean(has_masked and has_unmasked)
is_full = Boolean(has_unmasked and (not has_masked))
if is_partial:
mask_idx[batch_idx, head_idx, m_block, num_mask_blocks] = n_block
num_mask_blocks += 1
elif is_full and const_expr(self.compute_full_blocks):
full_idx[batch_idx, head_idx, m_block, num_full_blocks] = n_block
num_full_blocks += 1
# Only thread 0 writes back the counts
if tidx == 0:
mask_cnt[batch_idx, head_idx, m_block] = num_mask_blocks
if const_expr(self.compute_full_blocks):
full_cnt[batch_idx, head_idx, m_block] = num_full_blocks
def compute_block_sparsity(
tile_m,
tile_n,
batch_size,
num_heads,
seqlen_q,
seqlen_k,
mask_mod: Callable,
aux_tensors: Optional[list], # list[cute.Tensor]
device,
compute_full_blocks: bool = True,
use_fast_sampling: bool = False,
) -> Tuple[BlockSparseTensors, BlockSparseTensorsTorch]:
"""
Computes block sparsity for a given `mask_mod`.
Args:
tile_m: The tile size for the m dimension.
tile_n: The tile size for the n dimension.
batch_size: The batch size.
num_heads: The number of heads.
seqlen_q: The sequence length for the query.
seqlen_k: The sequence length for the key.
mask_mod: The `mask_mod` callable to use.
aux_tensors: A list of auxiliary tensors.
device: The device to use.
compute_full_blocks: Whether to compute full blocks. If False, only partially-masked blocks are computed.
use_fast_sampling: Whether to use 5-point sampling (4 corners + center). This is much faster, but only suitable for masks where this check is sufficient.
Returns:
A tuple of `BlockSparseTensors` and `BlockSparseTensorsTorch`.
"""
# Check if mask_mod is marked as suitable for 5-point fast sampling
use_fast_sampling = getattr(mask_mod, "use_fast_sampling", use_fast_sampling)
num_m_blocks = (seqlen_q + tile_m - 1) // tile_m
num_n_blocks = (seqlen_k + tile_n - 1) // tile_n
mask_block_cnt = torch.zeros(
(batch_size, num_heads, num_m_blocks), device=device, dtype=torch.int32
)
mask_block_idx = torch.zeros(
(batch_size, num_heads, num_m_blocks, num_n_blocks), device=device, dtype=torch.int32
)
full_block_cnt = (
torch.zeros((batch_size, num_heads, num_m_blocks), device=device, dtype=torch.int32)
if compute_full_blocks
else None
)
full_block_idx = (
torch.zeros(
(batch_size, num_heads, num_m_blocks, num_n_blocks), device=device, dtype=torch.int32
)
if compute_full_blocks
else None
)
blocksparse_tensors_torch = BlockSparseTensorsTorch(
mask_block_cnt=mask_block_cnt,
mask_block_idx=mask_block_idx,
full_block_cnt=full_block_cnt,
full_block_idx=full_block_idx,
block_size=(tile_m, tile_n),
)
mask_mod_hash = hash_callable(mask_mod)
blocksparse_tensors = to_cute_block_sparse_tensors(
blocksparse_tensors_torch, enable_tvm_ffi=True
)
compile_key = (
tile_m,
tile_n,
mask_mod_hash,
compute_full_blocks,
aux_tensors is not None,
use_fast_sampling,
)
if compile_key not in compute_block_sparsity.compile_cache:
kernel = BlockSparsityKernel(
mask_mod,
tile_mn=(tile_m, tile_n),
compute_full_blocks=compute_full_blocks,
use_aux_tensors=aux_tensors is not None,
use_fast_sampling=use_fast_sampling,
)
compute_block_sparsity.compile_cache[compile_key] = cute.compile(
kernel, blocksparse_tensors, seqlen_q, seqlen_k, aux_tensors, options="--enable-tvm-ffi"
)
compute_block_sparsity.compile_cache[compile_key](
blocksparse_tensors_torch[:4],
seqlen_q,
seqlen_k,
aux_tensors,
)
return blocksparse_tensors, blocksparse_tensors_torch
compute_block_sparsity.compile_cache = {}
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/compute_block_sparsity.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 326,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:tests/cute/test_block_sparsity.py | """Tests for block sparsity computation in flash attention."""
import pytest
import torch
from torch.nn.attention.flex_attention import create_block_mask
from mask_mod_definitions import get_mask_pair
from flash_attn.cute.compute_block_sparsity import compute_block_sparsity
def _call_compute_block_sparsity(
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
mask_name,
window_size=None,
aux_tensors=None,
use_fast_sampling=False,
):
"""Call compute_block_sparsity and return torch tensors."""
cute_mask, _ = get_mask_pair(
mask_name, seqlen_q=seqlen_q, seqlen_k=seqlen_k, window_size=window_size
)
_, torch_tensors = compute_block_sparsity(
tile_m=tile_m,
tile_n=tile_n,
batch_size=batch_size,
num_heads=nheads,
seqlen_q=seqlen_q,
seqlen_k=seqlen_k,
mask_mod=cute_mask,
aux_tensors=aux_tensors,
device="cuda",
use_fast_sampling=use_fast_sampling,
)
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx, *_ = torch_tensors
return mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx
def _compare_block_sparsity(
mask_block_cnt,
mask_block_idx,
full_block_cnt,
full_block_idx,
mask_block_cnt_ref,
mask_block_idx_ref,
full_block_cnt_ref,
full_block_idx_ref,
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
):
"""Compare block sparsity against reference, handling boundary block semantics.
PyTorch treats OOB regions as masked, so boundary blocks with all in-bounds
elements unmasked appear as "partial" in PyTorch but "full" in CuTe.
This applies to BOTH boundary m_blocks (OOB q_idx) and boundary n_blocks (OOB kv_idx).
"""
if not isinstance(mask_block_cnt, torch.Tensor):
return False, f"mask_block_cnt is not a tensor: {type(mask_block_cnt)}"
n_blocks_q = mask_block_cnt.shape[2]
# Identify boundary blocks
last_m_block = (seqlen_q - 1) // tile_m
last_n_block = (seqlen_k - 1) // tile_n
m_is_boundary = seqlen_q % tile_m != 0
n_is_boundary = seqlen_k % tile_n != 0
def is_boundary_n_block(n_block):
return n_is_boundary and n_block == last_n_block
def is_boundary_m_block(m_block):
return m_is_boundary and m_block == last_m_block
for b in range(batch_size):
for h in range(nheads):
for m in range(n_blocks_q):
cute_mask_cnt = mask_block_cnt[b, h, m].item()
cute_full_cnt = full_block_cnt[b, h, m].item()
ref_mask_cnt = mask_block_cnt_ref[b, h, m].item()
ref_full_cnt = full_block_cnt_ref[b, h, m].item()
cute_mask_set = set(mask_block_idx[b, h, m, :cute_mask_cnt].tolist())
cute_full_set = set(full_block_idx[b, h, m, :cute_full_cnt].tolist())
ref_mask_set = set(mask_block_idx_ref[b, h, m, :ref_mask_cnt].tolist())
ref_full_set = set(full_block_idx_ref[b, h, m, :ref_full_cnt].tolist())
# A block is "boundary-affected" if EITHER the m_block OR n_block is at boundary
def is_boundary_affected(n_block):
return is_boundary_m_block(m) or is_boundary_n_block(n_block)
# Blocks that are full in CuTe but not in ref
full_in_cute_not_ref = cute_full_set - ref_full_set
for n_block in full_in_cute_not_ref:
if not is_boundary_affected(n_block):
return False, (
f"Non-boundary block mismatch at [{b},{h},{m}]: "
f"n_block {n_block} is full in CuTe but not in ref"
)
# Boundary-affected: CuTe says full, ref should say partial
if n_block not in ref_mask_set:
# Check if ref skipped it entirely (all masked)
# This is valid for boundary blocks
pass
# Blocks that are partial in CuTe but full in ref (would be a bug)
partial_in_cute_full_in_ref = cute_mask_set & ref_full_set
if partial_in_cute_full_in_ref:
return False, (
f"Block mismatch at [{b},{h},{m}]: "
f"n_blocks {sorted(partial_in_cute_full_in_ref)} are partial in CuTe but full in ref"
)
# Check non-boundary blocks match exactly
non_boundary_cute_full = {
n for n in cute_full_set if not is_boundary_affected(n)
}
non_boundary_ref_full = {
n for n in ref_full_set if not is_boundary_affected(n)
}
if non_boundary_cute_full != non_boundary_ref_full:
return False, (
f"Non-boundary full block mismatch at [{b},{h},{m}]: "
f"CuTe={sorted(non_boundary_cute_full)}, ref={sorted(non_boundary_ref_full)}"
)
non_boundary_cute_mask = {
n for n in cute_mask_set if not is_boundary_affected(n)
}
non_boundary_ref_mask = {
n for n in ref_mask_set if not is_boundary_affected(n)
}
if non_boundary_cute_mask != non_boundary_ref_mask:
return False, (
f"Non-boundary partial block mismatch at [{b},{h},{m}]: "
f"CuTe={sorted(non_boundary_cute_mask)}, ref={sorted(non_boundary_ref_mask)}"
)
return True, ""
# Test configurations
SEQLEN_PAIRS = [
# Small aligned
(64, 64),
(128, 128),
(256, 256),
(512, 512),
# Rectangular
(128, 256),
(256, 128),
(512, 256),
(256, 512),
# Large aligned
(1024, 1024),
(2048, 2048),
(4096, 4096),
(8192, 8192),
# Large unaligned
(1000, 1000),
(2000, 2000),
(4000, 4000),
# Edge cases with unaligned seqlens
(113, 203),
(127, 127),
(129, 129),
(255, 255),
(257, 257),
(1023, 1023),
(1025, 1025),
(2047, 2047),
(2049, 2049),
]
TILE_SIZES = [
# Standard powers of 2
(32, 32),
(64, 64),
(128, 128),
(256, 256),
# Rectangular
(32, 64),
(64, 32),
(64, 128),
(128, 64),
(128, 256),
(256, 128),
# Unusual sizes
(40, 40),
(48, 48),
(96, 96),
(112, 112),
(32, 128),
(128, 32),
(40, 96),
(96, 40),
]
@pytest.mark.parametrize("seqlen_q,seqlen_k", SEQLEN_PAIRS)
@pytest.mark.parametrize("tile_m,tile_n", TILE_SIZES)
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize("nheads", [1, 4])
@pytest.mark.parametrize("mask_name", ["block_diagonal", "mini_causal"])
def test_fixed_length_masks(
seqlen_q, seqlen_k, tile_m, tile_n, batch_size, nheads, mask_name
):
"""Test fixed-length masks."""
seqlen_unaligned = (seqlen_q % tile_m != 0) or (seqlen_k % tile_n != 0)
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx = (
_call_compute_block_sparsity(
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
mask_name,
use_fast_sampling=False,
)
)
_, mask_mod_flex = get_mask_pair(mask_name)
block_mask = create_block_mask(
mask_mod_flex,
B=batch_size,
H=nheads,
Q_LEN=seqlen_q,
KV_LEN=seqlen_k,
device="cuda",
BLOCK_SIZE=(tile_m, tile_n),
)
(
_,
_,
mask_block_cnt_ref,
mask_block_idx_ref,
full_block_cnt_ref,
full_block_idx_ref,
*_,
) = block_mask.as_tuple()
print("CuTe results:")
print(f" mask_block_cnt: {mask_block_cnt}")
print(f" full_block_cnt: {full_block_cnt}")
print(f" mask_block_idx: {mask_block_idx}")
print(f" full_block_idx: {full_block_idx}")
print("Torch results:")
print(f" mask_block_cnt: {mask_block_cnt_ref}")
print(f" full_block_cnt: {full_block_cnt_ref}")
print(f" mask_block_idx: {mask_block_idx_ref}")
print(f" full_block_idx: {full_block_idx_ref}")
all_match, error_msg = _compare_block_sparsity(
mask_block_cnt,
mask_block_idx,
full_block_cnt,
full_block_idx,
mask_block_cnt_ref,
mask_block_idx_ref,
full_block_cnt_ref,
full_block_idx_ref,
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
)
assert all_match, f"Mismatch: {error_msg}"
@pytest.mark.parametrize("seqlen_q,seqlen_k", SEQLEN_PAIRS)
@pytest.mark.parametrize(
"tile_m,tile_n", [(64, 64), (128, 128), (64, 128), (128, 64), (256, 256)]
)
@pytest.mark.parametrize("batch_size", [1])
@pytest.mark.parametrize("nheads", [1, 4])
@pytest.mark.parametrize(
"mask_name,window_size",
[("causal", None), ("sliding_window", 64), ("sliding_window", 256)],
)
def test_parameterized_masks(
seqlen_q, seqlen_k, tile_m, tile_n, batch_size, nheads, mask_name, window_size
):
"""Test parameterized masks."""
if mask_name == "sliding_window" and seqlen_q > seqlen_k:
pytest.skip("Sliding window not supported for seqlen_q > seqlen_k")
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx = (
_call_compute_block_sparsity(
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
mask_name,
window_size=window_size,
)
)
_, mask_mod_flex = get_mask_pair(
mask_name, seqlen_q=seqlen_q, seqlen_k=seqlen_k, window_size=window_size
)
block_mask = create_block_mask(
mask_mod_flex,
B=batch_size,
H=nheads,
Q_LEN=seqlen_q,
KV_LEN=seqlen_k,
device="cuda",
BLOCK_SIZE=(tile_m, tile_n),
)
(
_,
_,
mask_block_cnt_ref,
mask_block_idx_ref,
full_block_cnt_ref,
full_block_idx_ref,
*_,
) = block_mask.as_tuple()
all_match, error_msg = _compare_block_sparsity(
mask_block_cnt,
mask_block_idx,
full_block_cnt,
full_block_idx,
mask_block_cnt_ref,
mask_block_idx_ref,
full_block_cnt_ref,
full_block_idx_ref,
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
)
assert all_match, f"Mismatch: {error_msg}"
@pytest.mark.parametrize(
"seqlen_q,seqlen_k,tile_m,tile_n",
[
(1, 1, 64, 64),
(63, 63, 64, 64),
(65, 65, 64, 64),
(129, 129, 128, 128),
(100, 200, 64, 128),
],
)
def test_edge_cases(seqlen_q, seqlen_k, tile_m, tile_n):
"""Test edge cases with unaligned dimensions."""
batch_size, nheads = 1, 1
seqlen_unaligned = (seqlen_q % tile_m != 0) or (seqlen_k % tile_n != 0)
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx = (
_call_compute_block_sparsity(
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
"causal",
)
)
_, mask_mod_flex = get_mask_pair("causal", seqlen_q=seqlen_q, seqlen_k=seqlen_k)
block_mask = create_block_mask(
mask_mod_flex,
B=batch_size,
H=nheads,
Q_LEN=seqlen_q,
KV_LEN=seqlen_k,
device="cuda",
BLOCK_SIZE=(tile_m, tile_n),
)
(
_,
_,
mask_block_cnt_ref,
mask_block_idx_ref,
full_block_cnt_ref,
full_block_idx_ref,
*_,
) = block_mask.as_tuple()
all_match, error_msg = _compare_block_sparsity(
mask_block_cnt,
mask_block_idx,
full_block_cnt,
full_block_idx,
mask_block_cnt_ref,
mask_block_idx_ref,
full_block_cnt_ref,
full_block_idx_ref,
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
)
assert all_match, f"Mismatch: {error_msg}"
@pytest.mark.parametrize("seqlen_q,seqlen_k", SEQLEN_PAIRS)
@pytest.mark.parametrize(
"tile_m,tile_n", [(64, 64), (128, 128), (64, 128), (128, 64), (256, 256)]
)
@pytest.mark.parametrize("nheads", [1, 4])
@pytest.mark.parametrize("mask_name", ["causal", "block_diagonal"])
def test_fast_sampling(seqlen_q, seqlen_k, tile_m, tile_n, nheads, mask_name):
"""Test fast sampling mode (5-point sampling)."""
batch_size = 1
seqlen_unaligned = (seqlen_q % tile_m != 0) or (seqlen_k % tile_n != 0)
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx = (
_call_compute_block_sparsity(
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
mask_name,
use_fast_sampling=True,
)
)
_, mask_mod_flex = get_mask_pair(mask_name, seqlen_q=seqlen_q, seqlen_k=seqlen_k)
block_mask = create_block_mask(
mask_mod_flex,
B=batch_size,
H=nheads,
Q_LEN=seqlen_q,
KV_LEN=seqlen_k,
device="cuda",
BLOCK_SIZE=(tile_m, tile_n),
)
(
_,
_,
mask_block_cnt_ref,
mask_block_idx_ref,
full_block_cnt_ref,
full_block_idx_ref,
*_,
) = block_mask.as_tuple()
all_match, error_msg = _compare_block_sparsity(
mask_block_cnt,
mask_block_idx,
full_block_cnt,
full_block_idx,
mask_block_cnt_ref,
mask_block_idx_ref,
full_block_cnt_ref,
full_block_idx_ref,
batch_size,
nheads,
seqlen_q,
seqlen_k,
tile_m,
tile_n,
)
assert all_match, f"Mismatch: {error_msg}"
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "tests/cute/test_block_sparsity.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 437,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:flash_attn/cute/block_sparse_utils.py | """
Block-sparse runtime utilities for CUTE DSL kernels.
This module contains runtime execution functions for block-sparse attention kernels.
These utilities are used by CUTE DSL kernels to produce and consume block-sparse loads.
"""
from typing import Callable, Optional
from functools import partial
import math
import cutlass
import cutlass.cute as cute
from cutlass import Float32, Int32, const_expr
from quack import copy_utils
# Import data structures from block_sparsity
from flash_attn.cute.block_sparsity import BlockSparseTensors
from flash_attn.cute.named_barrier import NamedBarrierBwd
# NOTE [SM100 block-sparse empty tiles: mbarrier contract]
#
# For block-sparse SM100 forward, a given (m_block, stage) Q tile can have zero active
# KV blocks (total_block_cnt == 0). In that case there is no seqlen_kv iteration, so
# the softmax warp-group has no row stats to publish.
#
# The correction warp-group seeds fully-masked-row stats and runs the usual correction
# epilogue so output/LSE have well-defined values. Both warp-groups must still perform
# the softmax<->correction mbarrier handshake so phases advance correctly across
# empty->empty and empty->non-empty tile sequences.
#
# In the no-sink case, this corresponds to the usual fully-masked-row convention:
# output is zero and LSE is -inf.
#
# Barrier contract (each is `mbar_ptr + <offset> + stage`):
#
# Producer/consumer pairs:
# - `mbar_softmax_corr_full` : softmax arrive -> correction wait
# - `mbar_softmax_corr_empty` : correction arrive -> softmax wait
# - `mbar_P_full_O_rescaled` : softmax arrive (+ correction arrive) -> MMA wait
# - `mbar_P_full_2` : softmax arrive -> MMA wait
# - `mbar_corr_epi_full_/empty` : correction <-> epilogue (only when epilogue is separate)
#
# Empty tile (`total_block_cnt == 0`):
# - Softmax: skips the seqlen_kv softmax path entirely (no P stores, no `mbar_P_full_*`).
# It only arrives `mbar_softmax_corr_full` once per stage as a synthetic "no work" signal.
# At the `softmax_loop` level, softmax unconditionally waits `mbar_softmax_corr_empty`
# before each tile (when block-sparse) to drain a prior correction arrival and keep
# phases aligned across non-empty -> empty transitions.
# - Correction: waits `mbar_softmax_corr_full`, seeds stats + runs `correction_epilogue(scale=0)`,
# and arrives `mbar_softmax_corr_empty` (and `mbar_corr_epi_full_/empty` when applicable).
# - No `mbar_P_full_*` barriers are arrived (no P, no MMA O); only the softmax<->correction
# (and correction<->epilogue) handshakes advance phases.
#
# Non-empty tile:
# - Softmax: runs `softmax_step` (produces P) and uses `mbar_softmax_corr_full/empty` to
# publish row_max (during seqlen_kv) and final row stats (once per tile), and to advance phases;
# arrives `mbar_P_full_*` when P is stored.
# - Correction: waits `mbar_softmax_corr_full`, may rescale/release O, arrives `mbar_softmax_corr_empty`
# to ack/advance, and arrives `mbar_P_full_O_rescaled` when MMA can proceed.
#
# Backward (SM100):
# - Empty KV tile: for a given `n_block`, `total_m_block_cnt == 0` means no Q tiles contribute.
# - Both the load and compute loops guard all pipeline work on `process_tile`, so empty tiles
# skip producer/consumer operations entirely (no per-tile mbarrier phase handshake like forward).
# - In the `not dKV_postprocess` path, dK/dV for empty KV tiles are explicitly written as zeros
# even when `process_tile == False` (see `flash_bwd_sm100.py` `should_zero_dKV`).
@cute.jit
def load_block_list(
block_indices: cute.Tensor,
block_count,
load_q_with_first: cutlass.Constexpr,
first_block_preloaded: cutlass.Constexpr,
kv_producer_state,
load_Q,
load_K,
load_V,
pipeline_k,
pipeline_v,
use_tma_q: cutlass.Constexpr,
tma_q_bytes: cutlass.Constexpr,
intra_wg_overlap: cutlass.Constexpr,
):
"""Iterate over the sparse blocks and load K, V (and Q) into the pipeline.
for the intra_wg_overlap case, we overlap the loads of K and V. And this
means we need to pipeline the last V load from the partial block case,
with the loads for the full blocks. Set first_block_preloaded when the
caller has already issued the first K load for the list.
Note:
we iterate along the block_n indices in reverse.
Returns:
Updated kv_producer_state after processing the block list.
"""
if block_count > 0:
if const_expr(not intra_wg_overlap):
# Peel first iteration: the first block may need to load Q alongside K,
# Parameters are already Constexpr, so no need to wrap in const_expr()
n_block_first = block_indices[block_count - 1]
extra_tx = tma_q_bytes if const_expr(load_q_with_first) and const_expr(use_tma_q) else 0
pipeline_k.producer_acquire(kv_producer_state, extra_tx_count=extra_tx)
if const_expr(load_q_with_first and use_tma_q):
load_Q(tma_bar_ptr=pipeline_k.producer_get_barrier(kv_producer_state))
load_K(src_idx=n_block_first, producer_state=kv_producer_state)
pipeline_v.producer_acquire(kv_producer_state)
load_V(src_idx=n_block_first, producer_state=kv_producer_state)
kv_producer_state.advance()
for offset in cutlass.range(1, block_count):
n_block = block_indices[block_count - 1 - offset]
pipeline_k.producer_acquire(kv_producer_state)
load_K(src_idx=n_block, producer_state=kv_producer_state)
pipeline_v.producer_acquire(kv_producer_state)
load_V(src_idx=n_block, producer_state=kv_producer_state)
kv_producer_state.advance()
else:
n_block_first = block_indices[block_count - 1]
if const_expr(not first_block_preloaded):
extra_tx = (
tma_q_bytes if const_expr(load_q_with_first) and const_expr(use_tma_q) else 0
)
pipeline_k.producer_acquire(kv_producer_state, extra_tx_count=extra_tx)
if const_expr(load_q_with_first and use_tma_q):
load_Q(tma_bar_ptr=pipeline_k.producer_get_barrier(kv_producer_state))
load_K(src_idx=n_block_first, producer_state=kv_producer_state)
for idx in cutlass.range(block_count - 1, unroll=1):
n_block_prev = block_indices[block_count - 1 - idx]
n_block = block_indices[block_count - 2 - idx]
kv_producer_state_prev = kv_producer_state.clone()
kv_producer_state.advance()
pipeline_k.producer_acquire(kv_producer_state)
load_K(src_idx=n_block, producer_state=kv_producer_state)
pipeline_v.producer_acquire(kv_producer_state_prev)
load_V(src_idx=n_block_prev, producer_state=kv_producer_state_prev)
return kv_producer_state
@cute.jit
def finish_overlap_v_load(
block_indices: cute.Tensor,
block_count,
load_V,
pipeline_v,
kv_producer_state,
):
"""Load the final V block after overlapped K/V loads."""
if block_count > 0:
n_block_last = block_indices[0]
pipeline_v.producer_acquire(kv_producer_state)
load_V(src_idx=n_block_last, producer_state=kv_producer_state)
kv_producer_state.advance()
return kv_producer_state
@cute.jit
def sparse_tensor_m_block(
m_block,
qhead_per_kvhead: cutlass.Constexpr[int],
q_subtile_factor: cutlass.Constexpr[int],
):
"""Map packed m_block indices to block-sparse tensor indices."""
block = m_block
if const_expr(qhead_per_kvhead != 1):
block = block // qhead_per_kvhead
if const_expr(q_subtile_factor != 1):
block = block // q_subtile_factor
return block
@cute.jit
def produce_block_sparse_loads(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
m_block,
kv_producer_state,
load_Q,
load_K,
load_V,
pipeline_k,
pipeline_v,
use_tma_q: cutlass.Constexpr,
tma_q_bytes: cutlass.Constexpr,
intra_wg_overlap: cutlass.Constexpr,
qhead_per_kvhead: cutlass.Constexpr[int] = 1,
q_subtile_factor: cutlass.Constexpr[int] = 1,
):
"""Iterate over the mask and full block lists for a single tile.
The masked (partial) list may leave the last V load pending when intra-warp-group
overlap is enabled. The first full block must consume that pending V while
issuing its own K load on the next pipeline stage.
In the intra-wg-overlap path, the last masked block leaves its V copy in flight
while we advance the producer state to start the next full K. Either the full list
overlaps that pending V load, or, if no full blocks exist, we explicitly drain it.
Args:
qhead_per_kvhead: Pack-GQA factor. When > 1, m_block is in packed space and
must be converted to unpacked for sparse tensor indexing.
"""
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx = blocksparse_tensors
m_block_sparse = sparse_tensor_m_block(m_block, qhead_per_kvhead, q_subtile_factor)
curr_mask_block_cnt = mask_block_cnt[batch_idx, head_idx, m_block_sparse]
curr_mask_block_idx = mask_block_idx[batch_idx, head_idx, m_block_sparse, None]
if const_expr(full_block_cnt is not None):
curr_full_block_cnt = full_block_cnt[batch_idx, head_idx, m_block_sparse]
curr_full_block_idx = full_block_idx[batch_idx, head_idx, m_block_sparse, None]
else:
curr_full_block_cnt = Int32(0)
curr_full_block_idx = None
mask_empty = curr_mask_block_cnt == 0
full_empty = curr_full_block_cnt == 0
if mask_empty:
# No masked blocks: the full list owns the initial Q+K load.
kv_producer_state = load_block_list(
curr_full_block_idx,
curr_full_block_cnt,
load_q_with_first=True,
first_block_preloaded=False,
kv_producer_state=kv_producer_state,
load_Q=load_Q,
load_K=load_K,
load_V=load_V,
pipeline_k=pipeline_k,
pipeline_v=pipeline_v,
use_tma_q=use_tma_q,
tma_q_bytes=tma_q_bytes,
intra_wg_overlap=intra_wg_overlap,
)
if const_expr(intra_wg_overlap) and curr_full_block_cnt > 0:
kv_producer_state = finish_overlap_v_load(
curr_full_block_idx,
curr_full_block_cnt,
load_V,
pipeline_v,
kv_producer_state,
)
else:
# Masked blocks present: load Q together with the first masked K so consumers can
# start immediately. When overlap is disabled this fully drains the list.
kv_producer_state = load_block_list(
curr_mask_block_idx,
curr_mask_block_cnt,
load_q_with_first=True,
first_block_preloaded=False,
kv_producer_state=kv_producer_state,
load_Q=load_Q,
load_K=load_K,
load_V=load_V,
pipeline_k=pipeline_k,
pipeline_v=pipeline_v,
use_tma_q=use_tma_q,
tma_q_bytes=tma_q_bytes,
intra_wg_overlap=intra_wg_overlap,
)
if full_empty:
if const_expr(intra_wg_overlap):
kv_producer_state = finish_overlap_v_load(
curr_mask_block_idx,
curr_mask_block_cnt,
load_V,
pipeline_v,
kv_producer_state,
)
else:
if const_expr(intra_wg_overlap):
# Bridge the masked list to the full list by overlapping the pending masked V
# with the first full K load.
n_block_mask_last = curr_mask_block_idx[0]
n_block_full_first = curr_full_block_idx[curr_full_block_cnt - 1]
kv_producer_state_prev = kv_producer_state.clone()
kv_producer_state.advance()
pipeline_k.producer_acquire(kv_producer_state)
load_K(src_idx=n_block_full_first, producer_state=kv_producer_state)
pipeline_v.producer_acquire(kv_producer_state_prev)
load_V(src_idx=n_block_mask_last, producer_state=kv_producer_state_prev)
kv_producer_state = load_block_list(
curr_full_block_idx,
curr_full_block_cnt,
load_q_with_first=False,
first_block_preloaded=True,
kv_producer_state=kv_producer_state,
load_Q=load_Q,
load_K=load_K,
load_V=load_V,
pipeline_k=pipeline_k,
pipeline_v=pipeline_v,
use_tma_q=use_tma_q,
tma_q_bytes=tma_q_bytes,
intra_wg_overlap=intra_wg_overlap,
)
kv_producer_state = finish_overlap_v_load(
curr_full_block_idx,
curr_full_block_cnt,
load_V,
pipeline_v,
kv_producer_state,
)
else:
# Non-overlap path with both lists: run the full list normally (skipping the Q
# reload because the masked list already issued it).
kv_producer_state = load_block_list(
curr_full_block_idx,
curr_full_block_cnt,
load_q_with_first=False,
first_block_preloaded=False,
kv_producer_state=kv_producer_state,
load_Q=load_Q,
load_K=load_K,
load_V=load_V,
pipeline_k=pipeline_k,
pipeline_v=pipeline_v,
use_tma_q=use_tma_q,
tma_q_bytes=tma_q_bytes,
intra_wg_overlap=intra_wg_overlap,
)
return kv_producer_state
@cute.jit
def consume_block_sparse_loads(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
m_block,
seqlen,
kv_consumer_state,
mma_pv_fn,
mma_one_n_block,
process_first_half_block,
process_last_half_block,
mask_fn,
score_mod_fn,
O_should_accumulate,
mask_mod,
fastdiv_mods,
intra_wg_overlap: cutlass.Constexpr,
warp_scheduler_barrier_sync: Callable,
warp_scheduler_barrier_arrive: Callable,
qhead_per_kvhead: cutlass.Constexpr[int] = 1,
q_subtile_factor: cutlass.Constexpr[int] = 1,
):
"""Consume the mask and full block lists for a single tile on the consumer side.
Mirrors `produce_block_sparse_loads` so that the consumer pipeline uses
the same sparse tensor indexing.
Args:
qhead_per_kvhead: Pack-GQA factor. When > 1, m_block is in packed space and
must be converted to unpacked for sparse tensor indexing.
"""
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx = blocksparse_tensors
m_block_sparse = sparse_tensor_m_block(m_block, qhead_per_kvhead, q_subtile_factor)
curr_mask_block_cnt = mask_block_cnt[batch_idx, head_idx, m_block_sparse]
curr_mask_block_idx = mask_block_idx[batch_idx, head_idx, m_block_sparse, None]
curr_full_block_cnt = full_block_cnt[batch_idx, head_idx, m_block_sparse]
curr_full_block_idx = full_block_idx[batch_idx, head_idx, m_block_sparse, None]
processed_any = curr_mask_block_cnt + curr_full_block_cnt > 0
if const_expr(not intra_wg_overlap):
if curr_mask_block_cnt > 0:
mask_n_block = curr_mask_block_idx[curr_mask_block_cnt - 1]
warp_scheduler_barrier_sync()
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=mask_n_block,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(
mask_fn,
mask_mod=mask_mod,
mask_seqlen=True,
fastdiv_mods=fastdiv_mods if cutlass.const_expr(mask_mod is not None) else None,
),
is_first_n_block=True,
)
O_should_accumulate = True
for i in cutlass.range(1, curr_mask_block_cnt):
mask_n_block = curr_mask_block_idx[curr_mask_block_cnt - 1 - i]
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=mask_n_block,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_mod=mask_mod, mask_seqlen=False),
is_first_n_block=False,
)
O_should_accumulate = True
if curr_full_block_cnt == 0:
warp_scheduler_barrier_arrive()
if curr_full_block_cnt > 0:
full_n_block = curr_full_block_idx[curr_full_block_cnt - 1]
if curr_mask_block_cnt == 0:
warp_scheduler_barrier_sync()
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=full_n_block,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_seqlen=True),
is_first_n_block=True,
)
O_should_accumulate = True
for i in cutlass.range(1, curr_full_block_cnt):
full_n_block = curr_full_block_idx[curr_full_block_cnt - 1 - i]
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=full_n_block,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_seqlen=False),
is_first_n_block=False,
)
O_should_accumulate = True
else:
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=full_n_block,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_mod=None, mask_seqlen=True),
is_first_n_block=False,
)
O_should_accumulate = True
for i in cutlass.range(1, curr_full_block_cnt):
full_n_block = curr_full_block_idx[curr_full_block_cnt - 1 - i]
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=full_n_block,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_mod=None, mask_seqlen=False),
is_first_n_block=False,
)
O_should_accumulate = True
warp_scheduler_barrier_arrive()
else:
if curr_mask_block_cnt > 0:
mask_n_block = curr_mask_block_idx[curr_mask_block_cnt - 1]
kv_consumer_state = process_first_half_block(
n_block=mask_n_block,
seqlen=seqlen,
kv_consumer_state=kv_consumer_state,
mask_fn=partial(
mask_fn,
mask_mod=mask_mod,
mask_seqlen=True,
fastdiv_mods=fastdiv_mods if cutlass.const_expr(mask_mod is not None) else None,
),
score_mod_fn=score_mod_fn,
is_first_block=True,
)
for i in cutlass.range(1, curr_mask_block_cnt):
mask_n_block = curr_mask_block_idx[curr_mask_block_cnt - 1 - i]
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=mask_n_block,
seqlen=seqlen,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_mod=mask_mod, mask_seqlen=False),
)
O_should_accumulate = True
if curr_full_block_cnt > 0:
full_n_block = curr_full_block_idx[curr_full_block_cnt - 1]
if curr_mask_block_cnt == 0:
kv_consumer_state = process_first_half_block(
n_block=full_n_block,
seqlen=seqlen,
kv_consumer_state=kv_consumer_state,
mask_fn=partial(mask_fn, mask_mod=None, mask_seqlen=True),
score_mod_fn=score_mod_fn,
is_first_block=True,
)
else:
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=full_n_block,
seqlen=seqlen,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_mod=None, mask_seqlen=True),
)
O_should_accumulate = True
for i in cutlass.range(1, curr_full_block_cnt):
full_n_block = curr_full_block_idx[curr_full_block_cnt - 1 - i]
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=full_n_block,
seqlen=seqlen,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_mod=None, mask_seqlen=False),
)
O_should_accumulate = True
if curr_mask_block_cnt + curr_full_block_cnt > 0:
kv_consumer_state = process_last_half_block(
kv_consumer_state=kv_consumer_state,
zero_init=not O_should_accumulate,
)
O_should_accumulate = True
return kv_consumer_state, O_should_accumulate, processed_any
@cute.jit
def load_block_list_sm100(
block_indices: cute.Tensor,
block_count,
load_q_with_first: cutlass.Constexpr,
q_stage: cutlass.Constexpr,
kv_producer_state,
load_Q,
load_K,
load_V,
pipeline_kv,
):
"""SM100 version of load_block_list (no intra_wg_overlap, no extra_tx_count)."""
if block_count > 0:
# First iteration: load Q alongside K if requested
n_block_first = block_indices[block_count - 1]
if const_expr(load_q_with_first):
# SM100 loads Q0 and optionally Q1
load_Q(block=0, stage=0)
if const_expr(q_stage == 2):
load_Q(block=1, stage=1)
# SM100 doesn't use producer_acquire for pipeline_kv in load path
# The pipeline barriers are handled inside load_KV
load_K(block=n_block_first, producer_state=kv_producer_state, page_idx=None)
kv_producer_state.advance()
load_V(block=n_block_first, producer_state=kv_producer_state, page_idx=None)
kv_producer_state.advance()
# Remaining blocks
for offset in cutlass.range(1, block_count):
n_block = block_indices[block_count - 1 - offset]
load_K(block=n_block, producer_state=kv_producer_state, page_idx=None)
kv_producer_state.advance()
load_V(block=n_block, producer_state=kv_producer_state, page_idx=None)
kv_producer_state.advance()
return kv_producer_state
# SM100-specific tile processor using SM100 helpers
@cute.jit
def produce_block_sparse_loads_sm100(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
m_block,
kv_producer_state,
load_Q,
load_K,
load_V,
pipeline_kv,
q_stage: cutlass.Constexpr,
q_producer_phase: Int32,
qhead_per_kvhead: cutlass.Constexpr,
q_subtile_factor: cutlass.Constexpr,
):
"""SM100 entry point for sparse block iteration.
SM100 uses PipelineTmaUmma which doesn't support extra_tx_count, so we use
simplified block processing that just calls producer_acquire without extras.
Args:
m_block: which tile of m we are processing
qhead_per_kvhead: Constexpr pack factor
"""
m_block_sparse = sparse_tensor_m_block(m_block, qhead_per_kvhead, q_subtile_factor)
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx = blocksparse_tensors
curr_mask_block_cnt = mask_block_cnt[batch_idx, head_idx, m_block_sparse]
curr_mask_block_idx = mask_block_idx[batch_idx, head_idx, m_block_sparse, None]
if const_expr(full_block_cnt is not None):
curr_full_block_cnt = full_block_cnt[batch_idx, head_idx, m_block_sparse]
curr_full_block_idx = full_block_idx[batch_idx, head_idx, m_block_sparse, None]
else:
curr_full_block_cnt = Int32(0)
curr_full_block_idx = None
mask_empty = curr_mask_block_cnt == 0
full_empty = curr_full_block_cnt == 0
q_phase_flipped = False
if mask_empty:
# No masked blocks: process full list with Q loading
kv_producer_state = load_block_list_sm100(
curr_full_block_idx,
curr_full_block_cnt,
load_q_with_first=True,
q_stage=q_stage,
kv_producer_state=kv_producer_state,
load_Q=load_Q,
load_K=load_K,
load_V=load_V,
pipeline_kv=pipeline_kv,
)
q_phase_flipped = not full_empty
else:
# Process masked blocks with Q loading
kv_producer_state = load_block_list_sm100(
curr_mask_block_idx,
curr_mask_block_cnt,
load_q_with_first=True,
q_stage=q_stage,
kv_producer_state=kv_producer_state,
load_Q=load_Q,
load_K=load_K,
load_V=load_V,
pipeline_kv=pipeline_kv,
)
q_phase_flipped = True
if not full_empty:
# Process full blocks without Q loading
kv_producer_state = load_block_list_sm100(
curr_full_block_idx,
curr_full_block_cnt,
load_q_with_first=False,
q_stage=q_stage,
kv_producer_state=kv_producer_state,
load_Q=load_Q,
load_K=load_K,
load_V=load_V,
pipeline_kv=pipeline_kv,
)
if q_phase_flipped:
q_producer_phase ^= 1
return kv_producer_state, q_producer_phase
@cute.jit
def get_total_block_count(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
m_block,
qhead_per_kvhead: cutlass.Constexpr,
q_subtile_factor: cutlass.Constexpr,
):
m_block_sparse = sparse_tensor_m_block(m_block, qhead_per_kvhead, q_subtile_factor)
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx = blocksparse_tensors
if const_expr(full_block_cnt is not None):
return (
mask_block_cnt[batch_idx, head_idx, m_block_sparse]
+ full_block_cnt[batch_idx, head_idx, m_block_sparse]
)
else:
return mask_block_cnt[batch_idx, head_idx, m_block_sparse]
@cute.jit
def handle_block_sparse_empty_tile_correction_sm100(
tidx: Int32,
q_stage: cutlass.Constexpr,
m_block_size: cutlass.Constexpr,
qhead_per_kvhead,
pack_gqa: cutlass.Constexpr,
is_split_kv: cutlass.Constexpr,
learnable_sink,
mLSE,
seqlen,
m_block: Int32,
head_idx: Int32,
batch_idx: Int32,
split_idx: Int32,
sScale: cute.Tensor,
stats: list,
correction_epilogue: Callable,
thr_mma_pv: cute.core.ThrMma,
tOtO: cute.Tensor,
sO: cute.Tensor,
pipeline_sm_stats: cutlass.pipeline.PipelineAsync,
sm_stats_barrier: cutlass.pipeline.NamedBarrier,
pipeline_o_epi: cutlass.pipeline.PipelineAsync,
sm_stats_consumer_phase: Int32,
o_corr_consumer_phase: Int32,
corr_epi_producer_phase: Int32,
softmax_scale_log2: Float32,
mO_cur: Optional[cute.Tensor] = None,
gO: Optional[cute.Tensor] = None,
gmem_tiled_copy_O: Optional[cute.TiledCopy] = None,
):
"""Handle SM100 forward block-sparse tiles with no active KV blocks.
This path is taken when `total_block_cnt == 0`. The softmax warp-group still
arrives `mbar_softmax_corr_full` (synthetic "no work") so the correction
warp-group can:
- seed fully-masked-row stats (row_sum=1; row_max=-inf when tracked) for LSE
- run `correction_epilogue` with `scale=0` so the output tile is written as zeros
(independent of any prior tmem contents)
- wait on `mbar_softmax_corr_full` and arrive `mbar_softmax_corr_empty`
(and `mbar_corr_epi_*` when applicable) so phases stay aligned across tiles
This helper intentionally does not touch `mbar_P_full_*` since no P is produced.
See NOTE [SM100 block-sparse empty tiles: mbarrier contract].
"""
LOG2_E = Float32(math.log2(math.e))
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx()) % 4
for stage in cutlass.range_constexpr(q_stage):
row_sum_value = Float32(1.0)
row_max_value = (
-Float32.inf if const_expr(mLSE is not None or learnable_sink is not None) else None
)
if const_expr(learnable_sink is not None):
sink_val = -Float32.inf
if const_expr(not pack_gqa):
sink_val = Float32(learnable_sink[head_idx])
elif tidx < m_block_size:
q_head_idx = (
(q_stage * m_block + stage) * m_block_size + tidx
) % qhead_per_kvhead + head_idx * qhead_per_kvhead
sink_val = Float32(learnable_sink[q_head_idx])
if sink_val != -Float32.inf and (const_expr(not is_split_kv) or split_idx == 0):
if row_max_value == -Float32.inf:
row_max_value = sink_val * (LOG2_E / softmax_scale_log2)
row_sum_value = Float32(1.0)
else:
row_sum_value = row_sum_value + cute.math.exp2(
sink_val * LOG2_E - row_max_value * softmax_scale_log2, fastmath=True
)
if tidx < m_block_size:
scale_row_idx = tidx + stage * m_block_size
sScale[scale_row_idx] = row_sum_value
if const_expr(mLSE is not None or learnable_sink is not None):
sScale[scale_row_idx + q_stage * m_block_size] = row_max_value
acc_flag = row_sum_value == Float32(0.0) or row_sum_value != row_sum_value
stats[stage] = (row_sum_value, row_max_value, acc_flag)
# See NOTE [SM100 block-sparse empty tiles: mbarrier contract].
# pipeline_sm_stats.consumer_wait_w_index_phase(stage, sm_stats_consumer_phase)
sm_stats_barrier.arrive_and_wait_w_index(index=stage * 4 + warp_idx)
pipeline_sm_stats.consumer_release_w_index(stage)
if const_expr(gmem_tiled_copy_O is None):
pipeline_o_epi.producer_acquire_w_index_phase(stage, corr_epi_producer_phase)
correction_epilogue(
thr_mma_pv,
tOtO[None, None, None, stage],
tidx,
stage,
m_block,
seqlen.seqlen_q,
Float32(0.0), # zero scale ensures empty tile writes zeros into staged outputs
sO[None, None, stage],
mO_cur,
gO[None, None, stage],
gmem_tiled_copy_O,
)
if const_expr(gmem_tiled_copy_O is None):
pipeline_o_epi.producer_commit_w_index(stage)
sm_stats_consumer_phase ^= 1
corr_epi_producer_phase ^= 1
return (
sm_stats_consumer_phase,
o_corr_consumer_phase,
corr_epi_producer_phase,
)
@cute.jit
def softmax_block_sparse_sm100(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
m_block,
softmax_step: Callable,
mask_fn: Callable,
mask_fn_none: Callable,
mma_si_consumer_phase: Int32,
si_corr_producer_phase: Int32,
s0_s1_sequence_phase: Int32,
pipeline_sm_stats: cutlass.pipeline.PipelineAsync,
sm_stats_barrier: cutlass.pipeline.NamedBarrier,
q_stage: cutlass.Constexpr,
stage_idx: Int32,
check_m_boundary: bool,
qhead_per_kvhead: cutlass.Constexpr,
q_subtile_factor: cutlass.Constexpr[int] = 1,
):
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx()) % 4
m_block_sparse = sparse_tensor_m_block(m_block, qhead_per_kvhead, q_subtile_factor)
mask_block_cnt, mask_block_idx, full_block_cnt, full_block_idx = blocksparse_tensors
curr_mask_block_cnt = mask_block_cnt[batch_idx, head_idx, m_block_sparse]
curr_mask_block_idx = mask_block_idx[batch_idx, head_idx, m_block_sparse, None]
if const_expr(full_block_cnt is not None):
curr_full_block_cnt = full_block_cnt[batch_idx, head_idx, m_block_sparse]
curr_full_block_idx = full_block_idx[batch_idx, head_idx, m_block_sparse, None]
else:
curr_full_block_cnt = Int32(0)
curr_full_block_idx = None
total_block_cnt = curr_mask_block_cnt + curr_full_block_cnt
if total_block_cnt == 0:
# See NOTE [SM100 block-sparse empty tiles: mbarrier contract].
# pipeline_sm_stats.producer_commit_w_index(stage_idx)
sm_stats_barrier.arrive_w_index(index=stage_idx * 4 + warp_idx)
else:
if curr_mask_block_cnt > 0:
mask_n_block = curr_mask_block_idx[curr_mask_block_cnt - 1]
(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
) = softmax_step(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
mask_n_block,
is_first=True,
mask_fn=partial(mask_fn, mask_seqlen=True, check_q_boundary=check_m_boundary),
)
for i in cutlass.range(1, curr_mask_block_cnt):
mask_n_block = curr_mask_block_idx[curr_mask_block_cnt - 1 - i]
(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
) = softmax_step(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
mask_n_block,
mask_fn=partial(mask_fn, mask_seqlen=False, check_q_boundary=check_m_boundary),
)
if curr_full_block_cnt > 0:
full_n_block = curr_full_block_idx[curr_full_block_cnt - 1]
if curr_mask_block_cnt == 0:
(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
) = softmax_step(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
full_n_block,
is_first=True,
mask_fn=partial(
mask_fn_none, mask_seqlen=True, check_q_boundary=check_m_boundary
),
)
else:
(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
) = softmax_step(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
full_n_block,
is_first=False,
mask_fn=partial(
mask_fn_none, mask_seqlen=False, check_q_boundary=check_m_boundary
),
)
for i in cutlass.range(1, curr_full_block_cnt):
full_n_block = curr_full_block_idx[curr_full_block_cnt - 1 - i]
(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
) = softmax_step(
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
full_n_block,
mask_fn=partial(
mask_fn_none, mask_seqlen=False, check_q_boundary=check_m_boundary
),
)
return (
mma_si_consumer_phase,
si_corr_producer_phase,
s0_s1_sequence_phase,
total_block_cnt == 0,
)
# =============================================================================
# Backward-specific block-sparse helpers (SM100)
# =============================================================================
#
# In backward, iteration is transposed compared to forward:
# - Forward: outer loop over m_blocks (Q tiles), inner loop over n_blocks (KV tiles)
# - Backward: outer loop over n_blocks (KV tiles), inner loop over m_blocks (Q tiles)
#
# The backward block-sparse tensors use "Q direction" indexing:
# - q_block_cnt[batch, head, n_block] → count of m_blocks to process for this KV tile
# - q_block_idx[batch, head, n_block, :] → indices of m_blocks to process
#
@cute.jit
def get_total_q_block_count_bwd(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
n_block,
subtile_factor: cutlass.Constexpr = 1,
m_block_max: int = 0,
):
"""Count total tile iterations for given n_block (KV tile) in backward."""
q_block_cnt, _, full_block_cnt, _ = blocksparse_tensors
total = q_block_cnt[batch_idx, head_idx, n_block]
if const_expr(full_block_cnt is not None):
total = total + full_block_cnt[batch_idx, head_idx, n_block]
return total * subtile_factor
@cute.jit
def produce_block_sparse_q_loads_bwd_sm100(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
n_block,
# Pipeline states (will be returned after advancing)
producer_state_Q_LSE,
producer_state_dO_dPsum,
# Pipelines
pipeline_Q,
pipeline_LSE,
pipeline_dO,
pipeline_dPsum,
# Load functions
load_K,
load_V,
load_Q,
load_dO,
copy_stats,
# Global tensors for LSE/dPsum
gLSE,
sLSE,
gdPsum,
sdPsum,
# TMA copy bytes for extra_tx_count
tma_copy_bytes_K,
tma_copy_bytes_V,
# Flags for which loads to perform
should_load_Q: cutlass.Constexpr,
should_load_dO: cutlass.Constexpr,
# Subtiling factor and bounds
subtile_factor: cutlass.Constexpr = 1,
m_block_max: int = 0,
):
"""SM100 backward block sparse loading with subtiling.
Returns updated (producer_state_Q_LSE, producer_state_dO_dPsum).
First iteration loads K/V alongside Q/dO; subsequent iterations load only Q/dO.
"""
(
curr_q_cnt,
curr_q_idx,
curr_full_cnt,
curr_full_idx,
loop_count,
) = get_block_sparse_iteration_info_bwd(
blocksparse_tensors, batch_idx, head_idx, n_block, subtile_factor, m_block_max
)
for iter_idx in cutlass.range(loop_count, unroll=1):
m_block, _ = get_m_block_from_iter_bwd(
iter_idx,
curr_q_cnt,
curr_q_idx,
curr_full_cnt,
curr_full_idx,
subtile_factor,
m_block_max,
)
m_block_safe = m_block
if m_block_max > 0:
m_block_safe = cutlass.min(m_block, m_block_max - 1)
if iter_idx == 0:
# First block: load K/V alongside Q/dO
if const_expr(should_load_Q):
pipeline_Q.producer_acquire(producer_state_Q_LSE, extra_tx_count=tma_copy_bytes_K)
load_K(tma_bar_ptr=pipeline_Q.producer_get_barrier(producer_state_Q_LSE))
load_Q(m_block_safe, producer_state=producer_state_Q_LSE)
pipeline_Q.producer_commit(producer_state_Q_LSE)
pipeline_LSE.producer_acquire(producer_state_Q_LSE)
with cute.arch.elect_one():
copy_stats(
gLSE[None, m_block_safe],
sLSE[None, producer_state_Q_LSE.index],
mbar_ptr=pipeline_LSE.producer_get_barrier(producer_state_Q_LSE),
)
producer_state_Q_LSE.advance()
if const_expr(should_load_dO):
pipeline_dO.producer_acquire(
producer_state_dO_dPsum, extra_tx_count=tma_copy_bytes_V
)
load_V(tma_bar_ptr=pipeline_dO.producer_get_barrier(producer_state_dO_dPsum))
load_dO(m_block_safe, producer_state=producer_state_dO_dPsum)
pipeline_dO.producer_commit(producer_state_dO_dPsum)
pipeline_dPsum.producer_acquire(producer_state_dO_dPsum)
with cute.arch.elect_one():
copy_stats(
gdPsum[None, m_block_safe],
sdPsum[None, producer_state_dO_dPsum.index],
mbar_ptr=pipeline_dPsum.producer_get_barrier(producer_state_dO_dPsum),
)
producer_state_dO_dPsum.advance()
else:
# Subsequent blocks: just load Q/dO (K/V already loaded)
if const_expr(should_load_Q):
pipeline_Q.producer_acquire(producer_state_Q_LSE)
load_Q(m_block_safe, producer_state=producer_state_Q_LSE)
pipeline_Q.producer_commit(producer_state_Q_LSE)
pipeline_LSE.producer_acquire(producer_state_Q_LSE)
with cute.arch.elect_one():
copy_stats(
gLSE[None, m_block_safe],
sLSE[None, producer_state_Q_LSE.index],
mbar_ptr=pipeline_LSE.producer_get_barrier(producer_state_Q_LSE),
)
producer_state_Q_LSE.advance()
if const_expr(should_load_dO):
pipeline_dO.producer_acquire(producer_state_dO_dPsum)
load_dO(m_block_safe, producer_state=producer_state_dO_dPsum)
pipeline_dO.producer_commit(producer_state_dO_dPsum)
pipeline_dPsum.producer_acquire(producer_state_dO_dPsum)
with cute.arch.elect_one():
copy_stats(
gdPsum[None, m_block_safe],
sdPsum[None, producer_state_dO_dPsum.index],
mbar_ptr=pipeline_dPsum.producer_get_barrier(producer_state_dO_dPsum),
)
producer_state_dO_dPsum.advance()
return producer_state_Q_LSE, producer_state_dO_dPsum
@cute.jit
def get_block_sparse_iteration_info_bwd(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
n_block,
subtile_factor: cutlass.Constexpr = 1,
m_block_max: int = 0,
):
"""Extract block-sparse iteration info for backward pass.
Returns (curr_q_cnt, curr_q_idx, curr_full_cnt, curr_full_idx, total_count).
"""
q_cnt, q_idx, full_cnt, full_idx = blocksparse_tensors
curr_q_cnt = q_cnt[batch_idx, head_idx, n_block]
curr_q_idx = q_idx[batch_idx, head_idx, n_block, None]
if const_expr(full_cnt is not None):
curr_full_cnt = full_cnt[batch_idx, head_idx, n_block]
curr_full_idx = full_idx[batch_idx, head_idx, n_block, None]
else:
curr_full_cnt = Int32(0)
curr_full_idx = None
sparse_block_count = curr_q_cnt
if const_expr(full_cnt is not None):
sparse_block_count = sparse_block_count + curr_full_cnt
total_count = sparse_block_count * subtile_factor
return curr_q_cnt, curr_q_idx, curr_full_cnt, curr_full_idx, total_count
@cute.jit
def get_m_block_from_iter_bwd(
iter_idx,
curr_q_cnt,
curr_q_idx: cute.Tensor,
curr_full_cnt,
curr_full_idx: Optional[cute.Tensor],
subtile_factor: cutlass.Constexpr = 1,
m_block_max: int = 0,
):
"""Derive m_block index and is_full_block flag from iteration index.
Returns (m_block, is_full_block):
- m_block: The actual Q-tile block index
- is_full_block: True if this is a full block (no mask_mod needed)
"""
sparse_iter_idx = iter_idx // subtile_factor
subtile_offset = iter_idx % subtile_factor
sparse_m_block = Int32(0)
is_full_block = False
if const_expr(curr_full_idx is not None):
if sparse_iter_idx < curr_q_cnt:
sparse_m_block = curr_q_idx[sparse_iter_idx]
else:
sparse_m_block = curr_full_idx[sparse_iter_idx - curr_q_cnt]
is_full_block = True
else:
sparse_m_block = curr_q_idx[sparse_iter_idx]
return sparse_m_block * subtile_factor + subtile_offset, is_full_block
@cute.jit
def _load_q_do_block_sm90(
m_block,
producer_state_Q,
producer_state_dO,
pipeline_Q,
pipeline_dO,
load_K,
load_V,
load_Q,
load_dO,
load_LSE,
load_dPsum,
tma_copy_bytes_K,
tma_copy_bytes_V,
Q_stage_eq_dO_stage: cutlass.Constexpr,
load_kv: bool,
):
"""Load one Q/dO block, optionally loading K/V on first iteration."""
if load_kv:
pipeline_Q.producer_acquire(producer_state_Q, extra_tx_count=tma_copy_bytes_K)
load_K(tma_bar_ptr=pipeline_Q.producer_get_barrier(producer_state_Q))
else:
pipeline_Q.producer_acquire(producer_state_Q)
load_Q(m_block, producer_state=producer_state_Q)
load_LSE(m_block, producer_state=producer_state_Q)
producer_state_dO_cur = (
producer_state_dO if const_expr(not Q_stage_eq_dO_stage) else producer_state_Q
)
if load_kv:
pipeline_dO.producer_acquire(producer_state_dO_cur, extra_tx_count=tma_copy_bytes_V)
load_V(tma_bar_ptr=pipeline_dO.producer_get_barrier(producer_state_dO_cur))
else:
pipeline_dO.producer_acquire(producer_state_dO_cur)
load_dO(m_block, producer_state=producer_state_dO_cur)
load_dPsum(m_block, producer_state=producer_state_dO_cur)
producer_state_Q.advance()
producer_state_dO.advance()
return producer_state_Q, producer_state_dO
@cute.jit
def produce_block_sparse_q_loads_bwd_sm90(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
n_block,
producer_state_Q,
producer_state_dO,
pipeline_Q,
pipeline_dO,
load_K,
load_V,
load_Q,
load_dO,
load_LSE,
load_dPsum,
tma_copy_bytes_K,
tma_copy_bytes_V,
Q_stage_eq_dO_stage: cutlass.Constexpr,
subtile_factor: cutlass.Constexpr,
m_block_max: int,
):
"""SM90 backward block sparse loading with separate partial/full loops.
K/V are loaded with the first valid block. Iterates partial blocks first,
then full blocks, matching consumer order.
Returns updated (producer_state_Q, producer_state_dO).
"""
q_cnt, q_idx, full_cnt, full_idx = blocksparse_tensors
curr_q_cnt = q_cnt[batch_idx, head_idx, n_block]
curr_q_idx = q_idx[batch_idx, head_idx, n_block, None]
if const_expr(full_cnt is not None):
curr_full_cnt = full_cnt[batch_idx, head_idx, n_block]
curr_full_idx = full_idx[batch_idx, head_idx, n_block, None]
else:
curr_full_cnt = Int32(0)
curr_full_idx = None
kv_loaded = False
for iter_idx in cutlass.range(curr_q_cnt * subtile_factor, unroll=1):
sparse_idx = iter_idx // subtile_factor
subtile_offset = iter_idx % subtile_factor
m_block = curr_q_idx[sparse_idx] * subtile_factor + subtile_offset
if m_block < m_block_max:
producer_state_Q, producer_state_dO = _load_q_do_block_sm90(
m_block,
producer_state_Q,
producer_state_dO,
pipeline_Q,
pipeline_dO,
load_K,
load_V,
load_Q,
load_dO,
load_LSE,
load_dPsum,
tma_copy_bytes_K,
tma_copy_bytes_V,
Q_stage_eq_dO_stage,
load_kv=not kv_loaded,
)
kv_loaded = True
if const_expr(full_cnt is not None):
for iter_idx in cutlass.range(curr_full_cnt * subtile_factor, unroll=1):
sparse_idx = iter_idx // subtile_factor
subtile_offset = iter_idx % subtile_factor
m_block = curr_full_idx[sparse_idx] * subtile_factor + subtile_offset
if m_block < m_block_max:
producer_state_Q, producer_state_dO = _load_q_do_block_sm90(
m_block,
producer_state_Q,
producer_state_dO,
pipeline_Q,
pipeline_dO,
load_K,
load_V,
load_Q,
load_dO,
load_LSE,
load_dPsum,
tma_copy_bytes_K,
tma_copy_bytes_V,
Q_stage_eq_dO_stage,
load_kv=not kv_loaded,
)
kv_loaded = True
return producer_state_Q, producer_state_dO
@cute.jit
def consume_block_sparse_mma_bwd_sm90(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
n_block,
consumer_state_Q,
consumer_state_dO,
mma_one_m_block_fn,
mask,
mask_mod,
is_causal: cutlass.Constexpr,
is_local: cutlass.Constexpr,
thr_mma_SdP,
score_mod_fn=None,
score_mod_bwd_fn=None,
subtile_factor: cutlass.Constexpr = 1,
m_block_max: int = 0,
aux_tensors=None,
fastdiv_mods=(None, None),
):
"""SM90 backward block sparse MMA consumption with separate partial/full loops.
Partial blocks are processed first (with mask_mod applied), then full blocks
(without mask_mod). This ensures mask_mod is only applied where needed.
Returns updated (consumer_state_Q, consumer_state_dO).
"""
q_cnt, q_idx, full_cnt, full_idx = blocksparse_tensors
curr_q_cnt = q_cnt[batch_idx, head_idx, n_block]
curr_q_idx = q_idx[batch_idx, head_idx, n_block, None]
if const_expr(full_cnt is not None):
curr_full_cnt = full_cnt[batch_idx, head_idx, n_block]
curr_full_idx = full_idx[batch_idx, head_idx, n_block, None]
else:
curr_full_cnt = Int32(0)
curr_full_idx = None
dKV_accumulate = False
mask_fn_partial = partial(
mask.apply_mask,
batch_idx=batch_idx,
head_idx=head_idx,
n_block=n_block,
thr_mma=thr_mma_SdP,
mask_seqlen=True,
mask_causal=is_causal,
mask_local=is_local,
mask_mod=mask_mod,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
mask_fn_full = partial(
mask.apply_mask,
batch_idx=batch_idx,
head_idx=head_idx,
n_block=n_block,
thr_mma=thr_mma_SdP,
mask_seqlen=True,
mask_causal=is_causal,
mask_local=is_local,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
for iter_idx in cutlass.range(curr_q_cnt * subtile_factor, unroll=1):
sparse_idx = iter_idx // subtile_factor
subtile_offset = iter_idx % subtile_factor
m_block = curr_q_idx[sparse_idx] * subtile_factor + subtile_offset
if m_block < m_block_max:
consumer_state_Q, consumer_state_dO = mma_one_m_block_fn(
m_block,
consumer_state_Q,
consumer_state_dO,
mask_fn=mask_fn_partial,
score_mod_fn=score_mod_fn,
score_mod_bwd_fn=score_mod_bwd_fn,
dKV_accumulate=dKV_accumulate,
)
dKV_accumulate = True
if const_expr(full_cnt is not None):
for iter_idx in cutlass.range(curr_full_cnt * subtile_factor, unroll=1):
sparse_idx = iter_idx // subtile_factor
subtile_offset = iter_idx % subtile_factor
m_block = curr_full_idx[sparse_idx] * subtile_factor + subtile_offset
if m_block < m_block_max:
consumer_state_Q, consumer_state_dO = mma_one_m_block_fn(
m_block,
consumer_state_Q,
consumer_state_dO,
mask_fn=mask_fn_full,
score_mod_fn=score_mod_fn,
score_mod_bwd_fn=score_mod_bwd_fn,
dKV_accumulate=dKV_accumulate,
)
dKV_accumulate = True
return consumer_state_Q, consumer_state_dO
@cute.jit
def _store_one_dQaccum_sm90(
m_block,
sdQaccum: cute.Tensor,
gdQaccum: cute.Tensor,
num_mma_warp_groups: cutlass.Constexpr,
num_threads_per_warp_group: cutlass.Constexpr,
tma_copy_bytes_dQ,
):
"""Store dQaccum for a single m_block."""
for warp_group_idx in cutlass.range_constexpr(num_mma_warp_groups):
cute.arch.cp_async_bulk_wait_group(num_mma_warp_groups - 1 - warp_group_idx, read=True)
cute.arch.barrier_arrive(
barrier_id=int(NamedBarrierBwd.dQEmptyWG0) + warp_group_idx,
number_of_threads=num_threads_per_warp_group + cute.arch.WARP_SIZE,
)
for warp_group_idx in cutlass.range_constexpr(num_mma_warp_groups):
cute.arch.barrier(
barrier_id=int(NamedBarrierBwd.dQFullWG0) + warp_group_idx,
number_of_threads=num_threads_per_warp_group + cute.arch.WARP_SIZE,
)
with cute.arch.elect_one():
copy_utils.cpasync_reduce_bulk_add_f32(
sdQaccum[None, warp_group_idx].iterator,
gdQaccum[None, warp_group_idx, m_block].iterator,
tma_copy_bytes_dQ,
)
cute.arch.cp_async_bulk_commit_group()
@cute.jit
def dQaccum_store_block_sparse_bwd_sm90(
blocksparse_tensors: BlockSparseTensors,
batch_idx,
head_idx,
n_block,
sdQaccum: cute.Tensor,
gdQaccum: cute.Tensor,
subtile_factor: cutlass.Constexpr,
m_block_max: int,
num_mma_warp_groups: cutlass.Constexpr,
num_threads_per_warp_group: cutlass.Constexpr,
tma_copy_bytes_dQ,
):
"""SM90 backward block sparse dQaccum store with separate partial/full loops.
Iterates partial blocks first, then full blocks, matching producer/consumer order.
"""
q_cnt, q_idx, full_cnt, full_idx = blocksparse_tensors
curr_q_cnt = q_cnt[batch_idx, head_idx, n_block]
curr_q_idx = q_idx[batch_idx, head_idx, n_block, None]
if const_expr(full_cnt is not None):
curr_full_cnt = full_cnt[batch_idx, head_idx, n_block]
curr_full_idx = full_idx[batch_idx, head_idx, n_block, None]
else:
curr_full_cnt = Int32(0)
curr_full_idx = None
for iter_idx in cutlass.range(curr_q_cnt * subtile_factor, unroll=1):
sparse_idx = iter_idx // subtile_factor
subtile_offset = iter_idx % subtile_factor
m_block = curr_q_idx[sparse_idx] * subtile_factor + subtile_offset
if m_block < m_block_max:
_store_one_dQaccum_sm90(
m_block,
sdQaccum,
gdQaccum,
num_mma_warp_groups,
num_threads_per_warp_group,
tma_copy_bytes_dQ,
)
if const_expr(full_cnt is not None):
for iter_idx in cutlass.range(curr_full_cnt * subtile_factor, unroll=1):
sparse_idx = iter_idx // subtile_factor
subtile_offset = iter_idx % subtile_factor
m_block = curr_full_idx[sparse_idx] * subtile_factor + subtile_offset
if m_block < m_block_max:
_store_one_dQaccum_sm90(
m_block,
sdQaccum,
gdQaccum,
num_mma_warp_groups,
num_threads_per_warp_group,
tma_copy_bytes_dQ,
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/block_sparse_utils.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1324,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/cute/benchmark.py | # Copyright (c) 2023, Tri Dao.
"""Useful functions for writing test code."""
import torch
import torch.utils.benchmark as benchmark
def benchmark_forward(
fn, *inputs, repeats=10, desc="", verbose=True, amp=False, amp_dtype=torch.float16, **kwinputs
):
"""Use Pytorch Benchmark on the forward pass of an arbitrary function."""
if verbose:
print(desc, "- Forward pass")
def amp_wrapper(*inputs, **kwinputs):
with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
fn(*inputs, **kwinputs)
t = benchmark.Timer(
stmt="fn_amp(*inputs, **kwinputs)",
globals={"fn_amp": amp_wrapper, "inputs": inputs, "kwinputs": kwinputs},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_backward(
fn,
*inputs,
grad=None,
repeats=10,
desc="",
verbose=True,
amp=False,
amp_dtype=torch.float16,
**kwinputs,
):
"""Use Pytorch Benchmark on the backward pass of an arbitrary function."""
if verbose:
print(desc, "- Backward pass")
with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
y = fn(*inputs, **kwinputs)
if type(y) is tuple:
y = y[0]
if grad is None:
grad = torch.randn_like(y)
else:
if grad.shape != y.shape:
raise RuntimeError("Grad shape does not match output shape")
def f(*inputs, y, grad):
# Set .grad to None to avoid extra operation of gradient accumulation
for x in inputs:
if isinstance(x, torch.Tensor):
x.grad = None
y.backward(grad, retain_graph=True)
t = benchmark.Timer(
stmt="f(*inputs, y=y, grad=grad)",
globals={"f": f, "inputs": inputs, "y": y, "grad": grad},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_combined(
fn,
*inputs,
grad=None,
repeats=10,
desc="",
verbose=True,
amp=False,
amp_dtype=torch.float16,
**kwinputs,
):
"""Use Pytorch Benchmark on the forward+backward pass of an arbitrary function."""
if verbose:
print(desc, "- Forward + Backward pass")
with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
y = fn(*inputs, **kwinputs)
if type(y) is tuple:
y = y[0]
if grad is None:
grad = torch.randn_like(y)
else:
if grad.shape != y.shape:
raise RuntimeError("Grad shape does not match output shape")
def f(grad, *inputs, **kwinputs):
for x in inputs:
if isinstance(x, torch.Tensor):
x.grad = None
with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
y = fn(*inputs, **kwinputs)
if type(y) is tuple:
y = y[0]
y.backward(grad, retain_graph=True)
t = benchmark.Timer(
stmt="f(grad, *inputs, **kwinputs)",
globals={"f": f, "fn": fn, "inputs": inputs, "grad": grad, "kwinputs": kwinputs},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_fwd_bwd(
fn,
*inputs,
grad=None,
repeats=10,
desc="",
verbose=True,
amp=False,
amp_dtype=torch.float16,
**kwinputs,
):
"""Use Pytorch Benchmark on the forward+backward pass of an arbitrary function."""
return (
benchmark_forward(
fn,
*inputs,
repeats=repeats,
desc=desc,
verbose=verbose,
amp=amp,
amp_dtype=amp_dtype,
**kwinputs,
),
benchmark_backward(
fn,
*inputs,
grad=grad,
repeats=repeats,
desc=desc,
verbose=verbose,
amp=amp,
amp_dtype=amp_dtype,
**kwinputs,
),
)
def benchmark_all(
fn,
*inputs,
grad=None,
repeats=10,
desc="",
verbose=True,
amp=False,
amp_dtype=torch.float16,
**kwinputs,
):
"""Use Pytorch Benchmark on the forward+backward pass of an arbitrary function."""
return (
benchmark_forward(
fn,
*inputs,
repeats=repeats,
desc=desc,
verbose=verbose,
amp=amp,
amp_dtype=amp_dtype,
**kwinputs,
),
benchmark_backward(
fn,
*inputs,
grad=grad,
repeats=repeats,
desc=desc,
verbose=verbose,
amp=amp,
amp_dtype=amp_dtype,
**kwinputs,
),
benchmark_combined(
fn,
*inputs,
grad=grad,
repeats=repeats,
desc=desc,
verbose=verbose,
amp=amp,
amp_dtype=amp_dtype,
**kwinputs,
),
)
def pytorch_profiler(
fn,
*inputs,
trace_filename=None,
backward=False,
amp=False,
amp_dtype=torch.float16,
cpu=False,
verbose=True,
**kwinputs,
):
"""Wrap benchmark functions in Pytorch profiler to see CUDA information."""
if backward:
with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
out = fn(*inputs, **kwinputs)
if type(out) is tuple:
out = out[0]
g = torch.randn_like(out)
for _ in range(30): # Warm up
if backward:
for x in inputs:
if isinstance(x, torch.Tensor):
x.grad = None
with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
out = fn(*inputs, **kwinputs)
if type(out) is tuple:
out = out[0]
# Backward should be done outside autocast
if backward:
out.backward(g, retain_graph=True)
activities = ([torch.profiler.ProfilerActivity.CPU] if cpu else []) + [
torch.profiler.ProfilerActivity.CUDA
]
with torch.profiler.profile(
activities=activities,
record_shapes=True,
# profile_memory=True,
with_stack=True,
) as prof:
if backward:
for x in inputs:
if isinstance(x, torch.Tensor):
x.grad = None
with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp):
out = fn(*inputs, **kwinputs)
if type(out) is tuple:
out = out[0]
if backward:
out.backward(g, retain_graph=True)
if verbose:
# print(prof.key_averages().table(sort_by="self_cuda_time_total", row_limit=50))
print(prof.key_averages().table(row_limit=50))
if trace_filename is not None:
prof.export_chrome_trace(trace_filename)
def benchmark_memory(fn, *inputs, desc="", verbose=True, **kwinputs):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
fn(*inputs, **kwinputs)
torch.cuda.synchronize()
mem = torch.cuda.max_memory_allocated() / ((2**20) * 1000)
if verbose:
print(f"{desc} max memory: {mem}GB")
torch.cuda.empty_cache()
return mem
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/benchmark.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/block_sparsity.py | """
Block-sparsity utilities for FlexAttention
"""
from typing import Callable, NamedTuple, Tuple
import cutlass.cute as cute
import torch
from flash_attn.cute.cute_dsl_utils import get_broadcast_dims, to_cute_tensor
def ceildiv(a: int, b: int) -> int:
return (a + b - 1) // b
class BlockSparseTensors(NamedTuple):
mask_block_cnt: cute.Tensor
mask_block_idx: cute.Tensor
full_block_cnt: cute.Tensor | None
full_block_idx: cute.Tensor | None
def __new_from_mlir_values__(self, values):
if len(values) == 2:
values = (*values, None, None)
return BlockSparseTensors(*values)
class BlockSparseTensorsTorch(NamedTuple):
mask_block_cnt: torch.Tensor
mask_block_idx: torch.Tensor
full_block_cnt: torch.Tensor | None = None
full_block_idx: torch.Tensor | None = None
block_size: tuple[int, int] | None = None
def _expand_sparsity_tensor(
tensor: torch.Tensor,
expected_shape: Tuple[int, ...],
tensor_name: str,
context: str | None,
hint: str | Callable[[], str] | None,
) -> torch.Tensor:
"""Check if we need to expand the tensor to expected shape, and do so if possible."""
needs_expand = tensor.shape != expected_shape
if not needs_expand:
return tensor
can_expand = all(map(lambda cur, tgt: cur == tgt or cur == 1, tensor.shape, expected_shape))
if not can_expand:
context_clause = f" ({context})" if context else ""
resolved_hint = hint() if callable(hint) else hint
hint_clause = f" Hint: {resolved_hint}" if resolved_hint else ""
raise ValueError(
f"{tensor_name}{context_clause} with shape {tensor.shape} cannot be expanded to expected shape {expected_shape}."
f"{hint_clause}"
)
return tensor.expand(*expected_shape)
def _check_and_expand_block(
name: str,
cnt: torch.Tensor | None,
idx: torch.Tensor | None,
expected_count_shape: Tuple[int, int, int],
expected_index_shape: Tuple[int, int, int, int],
context: str | None,
hint: str | Callable[[], str] | None,
) -> Tuple[torch.Tensor | None, torch.Tensor | None]:
if (cnt is None) != (idx is None):
raise ValueError(
f"{name}_block_cnt and {name}_block_idx must both be provided or both be None"
)
if cnt is None or idx is None:
return None, None
if cnt.dtype != torch.int32 or idx.dtype != torch.int32:
raise ValueError(f"{name}_block tensors must have dtype torch.int32")
if cnt.device != idx.device:
raise ValueError(f"{name}_block_cnt and {name}_block_idx must be on the same device")
if not cnt.is_cuda or not idx.is_cuda:
raise ValueError(f"{name}_block tensors must live on CUDA")
expanded_cnt = _expand_sparsity_tensor(
cnt, expected_count_shape, f"{name}_block_cnt", context, hint
)
expanded_idx = _expand_sparsity_tensor(
idx, expected_index_shape, f"{name}_block_idx", context, hint
)
return expanded_cnt, expanded_idx
def get_block_sparse_expected_shapes(
batch_size: int,
num_head: int,
seqlen_q: int,
seqlen_k: int,
m_block_size: int,
n_block_size: int,
q_stage: int,
) -> Tuple[Tuple[int, int, int], Tuple[int, int, int, int]]:
"""Return (expected_count_shape, expected_index_shape) for block sparse normalization."""
m_block_size_effective = q_stage * m_block_size
expected_m_blocks = ceildiv(seqlen_q, m_block_size_effective)
expected_n_blocks = ceildiv(seqlen_k, n_block_size)
expected_count_shape = (batch_size, num_head, expected_m_blocks)
expected_index_shape = (batch_size, num_head, expected_m_blocks, expected_n_blocks)
return expected_count_shape, expected_index_shape
def infer_block_sparse_expected_shapes(
tensors: BlockSparseTensorsTorch,
*,
batch_size: int,
num_head: int,
seqlen_q: int,
seqlen_k: int,
m_block_size: int,
n_block_size: int,
q_stage: int,
context: str,
sparse_block_size_q: int | None = None,
sparse_block_size_kv: int | None = None,
) -> Tuple[Tuple[int, int, int], Tuple[int, int, int, int], int]:
"""Infer shapes and scaling for block-sparse tensors.
Expectations:
- mask_block_cnt is (B, H, M) and mask_block_idx is (B, H, M, N).
- Batch/head dims may be 1 for broadcast, or match the requested sizes.
- sparse_block_size_kv must match tile_n.
- sparse_block_size_q must be a multiple of q_stage * tile_m.
- If sparse_block_size_q is omitted and seqlen_q/num_m_blocks is ambiguous,
the caller must provide block_size to disambiguate. TODO will make this required in a future PR.
"""
base_m_block = q_stage * m_block_size
base_n_block = n_block_size
if sparse_block_size_kv is None:
sparse_block_size_kv = base_n_block
if sparse_block_size_kv != base_n_block:
raise ValueError(f"Block sparse tensors{context} require BLOCK_SIZE_KV={base_n_block}.")
if tensors.mask_block_idx is None:
raise ValueError("mask_block_cnt and mask_block_idx must be provided for block sparsity.")
num_m_blocks = tensors.mask_block_idx.shape[2]
if sparse_block_size_q is None:
min_block_size = ceildiv(seqlen_q, num_m_blocks)
if num_m_blocks == 1:
max_block_size = seqlen_q
else:
max_block_size = (seqlen_q - 1) // (num_m_blocks - 1)
if max_block_size != min_block_size and base_m_block != 1:
raise ValueError(
f"Block sparse tensors{context} require explicit sparse_block_size[0] "
f"to disambiguate block size for seqlen_q={seqlen_q} and num_m_blocks={num_m_blocks}."
)
sparse_block_size_q = min_block_size
if sparse_block_size_q % base_m_block != 0:
raise ValueError(
f"Block sparse tensors{context} have block size {sparse_block_size_q}, "
f"which must be a multiple of {base_m_block}."
)
expected_m_blocks = ceildiv(seqlen_q, sparse_block_size_q)
expected_n_blocks = ceildiv(seqlen_k, sparse_block_size_kv)
q_subtile_factor = sparse_block_size_q // base_m_block
expected_count_shape = (batch_size, num_head, expected_m_blocks)
expected_index_shape = (batch_size, num_head, expected_m_blocks, expected_n_blocks)
mask_block_cnt = tensors.mask_block_cnt
mask_block_idx = tensors.mask_block_idx
if mask_block_cnt is None or mask_block_idx is None:
raise ValueError("mask_block_cnt and mask_block_idx must be provided for block sparsity.")
if mask_block_cnt.ndim != 3 or mask_block_idx.ndim != 4:
raise ValueError(
f"Block sparse tensors{context} must have shapes (B, H, M) and (B, H, M, N)."
)
for dim_name, cur, tgt in (
("batch", mask_block_cnt.shape[0], expected_count_shape[0]),
("head", mask_block_cnt.shape[1], expected_count_shape[1]),
):
if cur != tgt and cur != 1:
raise ValueError(f"Block sparse tensors{context} {dim_name} dim must be {tgt} or 1.")
for dim_name, cur, tgt in (
("batch", mask_block_idx.shape[0], expected_index_shape[0]),
("head", mask_block_idx.shape[1], expected_index_shape[1]),
):
if cur != tgt and cur != 1:
raise ValueError(f"Block sparse tensors{context} {dim_name} dim must be {tgt} or 1.")
if mask_block_cnt.shape[2] != mask_block_idx.shape[2]:
raise ValueError(f"Block sparse tensors{context} must share the same m-block dimension.")
if mask_block_idx.shape[3] != expected_n_blocks:
raise ValueError(
f"Block sparse tensors{context} n-block dimension must be {expected_n_blocks}."
)
if expected_m_blocks != num_m_blocks:
raise ValueError(
f"Block sparse tensors{context} m-block dimension {num_m_blocks} does not match "
f"sparse_block_size_q={sparse_block_size_q}. "
f"Set BlockSparseTensorsTorch.block_size to match the BlockMask BLOCK_SIZE."
)
return expected_count_shape, expected_index_shape, q_subtile_factor
def get_block_sparse_expected_shapes_bwd(
batch_size: int,
num_head: int,
seqlen_q: int,
seqlen_k: int,
m_block_size: int,
n_block_size: int,
subtile_factor: int,
) -> Tuple[Tuple[int, int, int], Tuple[int, int, int, int]]:
"""Return (expected_count_shape, expected_index_shape) for backward block sparse normalization.
Backward uses Q-direction indexing (transposed from forward), where shapes are
indexed by N-blocks first, then M-blocks. The sparse_block_size_q is determined
by subtile_factor * m_block_size.
"""
sparse_block_size_q = subtile_factor * m_block_size
expected_m_blocks = ceildiv(seqlen_q, sparse_block_size_q)
expected_n_blocks = ceildiv(seqlen_k, n_block_size)
expected_count_shape = (batch_size, num_head, expected_n_blocks)
expected_index_shape = (batch_size, num_head, expected_n_blocks, expected_m_blocks)
return expected_count_shape, expected_index_shape
def normalize_block_sparse_tensors(
tensors: BlockSparseTensorsTorch,
*,
expected_count_shape: Tuple[int, int, int],
expected_index_shape: Tuple[int, int, int, int],
context: str | None = None,
hint: str | Callable[[], str] | None = None,
) -> BlockSparseTensorsTorch:
if tensors.mask_block_cnt is None or tensors.mask_block_idx is None:
raise ValueError("mask_block_cnt and mask_block_idx must be provided for block sparsity.")
mask_cnt, mask_idx = _check_and_expand_block(
"mask",
tensors.mask_block_cnt,
tensors.mask_block_idx,
expected_count_shape,
expected_index_shape,
context,
hint,
)
if mask_cnt is None or mask_idx is None:
raise ValueError("mask_block_cnt and mask_block_idx must be provided for block sparsity.")
full_cnt, full_idx = _check_and_expand_block(
"full",
tensors.full_block_cnt,
tensors.full_block_idx,
expected_count_shape,
expected_index_shape,
context,
hint,
)
if full_cnt is not None and mask_cnt.device != full_cnt.device:
raise ValueError("All block sparse tensors must be on the same device")
return BlockSparseTensorsTorch(
mask_block_cnt=mask_cnt,
mask_block_idx=mask_idx,
full_block_cnt=full_cnt,
full_block_idx=full_idx,
block_size=tensors.block_size,
)
def is_block_sparsity_enabled(tensors: BlockSparseTensorsTorch) -> bool:
return any(t is not None for t in (tensors.full_block_cnt, tensors.mask_block_cnt))
def get_block_sparse_broadcast_pattern(
tensors: BlockSparseTensorsTorch,
) -> Tuple[Tuple[bool, ...], ...] | None:
"""Return broadcast pattern for block sparse tensors by checking actual strides.
Returns a tuple of broadcast patterns (one per tensor) where each pattern
is a tuple of bools indicating which dims have stride=0.
This is used in compile keys to ensure kernels are recompiled when
broadcast patterns change, since CuTe's mark_layout_dynamic() keeps
stride=0 as static.
The tensors should already be expanded/normalized before calling this function.
Returns None if block sparsity is not enabled.
"""
if not is_block_sparsity_enabled(tensors):
return None
patterns = []
for tensor in (
tensors.mask_block_cnt,
tensors.mask_block_idx,
tensors.full_block_cnt,
tensors.full_block_idx,
):
if tensor is not None:
patterns.append(get_broadcast_dims(tensor))
else:
patterns.append(None)
return tuple(patterns)
def normalize_block_sparse_config(
tensors: BlockSparseTensorsTorch,
*,
batch_size: int,
num_head: int,
seqlen_q: int,
seqlen_k: int,
block_size: tuple[int, int],
q_stage: int,
) -> tuple[BlockSparseTensorsTorch, Tuple[Tuple[bool, ...], ...] | None, int]:
m_block_size, n_block_size = block_size
if tensors.block_size is None:
sparse_block_size_q, sparse_block_size_kv = q_stage * m_block_size, n_block_size
else:
sparse_block_size_q, sparse_block_size_kv = tensors.block_size
if sparse_block_size_kv != n_block_size:
raise ValueError(
f"Block sparsity requires sparse_block_size[1]={n_block_size} to match tile_n."
)
expected_count_shape, expected_index_shape, q_subtile_factor = (
infer_block_sparse_expected_shapes(
tensors,
batch_size=batch_size,
num_head=num_head,
seqlen_q=seqlen_q,
seqlen_k=seqlen_k,
m_block_size=m_block_size,
n_block_size=n_block_size,
q_stage=q_stage,
context="forward",
sparse_block_size_q=sparse_block_size_q,
sparse_block_size_kv=sparse_block_size_kv,
)
)
normalized_tensors = normalize_block_sparse_tensors(
tensors,
expected_count_shape=expected_count_shape,
expected_index_shape=expected_index_shape,
)
return (
normalized_tensors,
get_block_sparse_broadcast_pattern(normalized_tensors),
q_subtile_factor,
)
def normalize_block_sparse_config_bwd(
tensors: BlockSparseTensorsTorch,
*,
batch_size: int,
num_head: int,
seqlen_q: int,
seqlen_k: int,
block_size: tuple[int, int],
subtile_factor: int,
) -> tuple[BlockSparseTensorsTorch, Tuple[Tuple[bool, ...], ...] | None]:
m_block_size, n_block_size = block_size
if tensors.block_size is None:
sparse_block_size_q, sparse_block_size_kv = subtile_factor * m_block_size, n_block_size
else:
sparse_block_size_q, sparse_block_size_kv = tensors.block_size
if sparse_block_size_q != subtile_factor * m_block_size:
raise ValueError(
f"Block sparsity expects sparse_block_size_q={subtile_factor * m_block_size} "
f"for subtile_factor={subtile_factor}."
)
if sparse_block_size_kv != n_block_size:
raise ValueError(
f"Block sparsity expects sparse_block_size[1]={n_block_size} to match tile_n."
)
expected_count_shape, expected_index_shape = get_block_sparse_expected_shapes_bwd(
batch_size,
num_head,
seqlen_q,
seqlen_k,
m_block_size,
n_block_size,
subtile_factor,
)
normalized_tensors = normalize_block_sparse_tensors(
tensors,
expected_count_shape=expected_count_shape,
expected_index_shape=expected_index_shape,
context="_flash_attn_bwd",
hint=lambda: (
f"Backward expects Q-direction block-sparse tensors (q_mask_cnt/q_mask_idx, "
f"and optionally full_q_cnt/full_q_idx). Regenerate the backward BlockMask with "
f"BLOCK_SIZE=({subtile_factor * m_block_size}, {n_block_size})."
),
)
return normalized_tensors, get_block_sparse_broadcast_pattern(normalized_tensors)
def to_cute_block_sparse_tensors(
tensors: BlockSparseTensorsTorch, enable_tvm_ffi: bool = True
) -> BlockSparseTensors | None:
"""Convert torch block sparsity tensors to CuTe tensors, optionally for tvm ffi"""
if not is_block_sparsity_enabled(tensors):
return None
(
mask_block_cnt,
mask_block_idx,
full_block_cnt,
full_block_idx,
*_,
) = tensors
(
mask_block_cnt_tensor,
mask_block_idx_tensor,
) = [
to_cute_tensor(t, assumed_align=4, leading_dim=-1, enable_tvm_ffi=enable_tvm_ffi)
for t in (mask_block_cnt, mask_block_idx)
]
(
full_block_cnt_tensor,
full_block_idx_tensor,
) = [
to_cute_tensor(t, assumed_align=4, leading_dim=-1, enable_tvm_ffi=enable_tvm_ffi)
if t is not None
else None
for t in (full_block_cnt, full_block_idx)
]
return BlockSparseTensors(
mask_block_cnt_tensor,
mask_block_idx_tensor,
full_block_cnt_tensor,
full_block_idx_tensor,
)
def fast_sampling(mask_mod):
"""Convenience decorator to mark mask_mod as safe for 5-point fast sampling"""
mask_mod.use_fast_sampling = True
return mask_mod
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/block_sparsity.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 391,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/cute/barrier.py | import cutlass
import cutlass.cute as cute
from cutlass import Int32
from cutlass.cutlass_dsl import T, dsl_user_op
from cutlass._mlir.dialects import llvm
@dsl_user_op
def ld_acquire(lock_ptr: cute.Pointer, *, loc=None, ip=None) -> cutlass.Int32:
lock_ptr_i64 = lock_ptr.toint(loc=loc, ip=ip).ir_value()
state = llvm.inline_asm(
T.i32(),
[lock_ptr_i64],
"ld.global.acquire.gpu.b32 $0, [$1];",
"=r,l",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
return cutlass.Int32(state)
@dsl_user_op
def red_relaxed(
lock_ptr: cute.Pointer, val: cutlass.Constexpr[Int32], *, loc=None, ip=None
) -> None:
lock_ptr_i64 = lock_ptr.toint(loc=loc, ip=ip).ir_value()
llvm.inline_asm(
None,
[lock_ptr_i64, Int32(val).ir_value(loc=loc, ip=ip)],
"red.relaxed.gpu.global.add.s32 [$0], $1;",
"l,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@dsl_user_op
def red_release(
lock_ptr: cute.Pointer, val: cutlass.Constexpr[Int32], *, loc=None, ip=None
) -> None:
lock_ptr_i64 = lock_ptr.toint(loc=loc, ip=ip).ir_value()
llvm.inline_asm(
None,
[lock_ptr_i64, Int32(val).ir_value(loc=loc, ip=ip)],
"red.release.gpu.global.add.s32 [$0], $1;",
"l,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@cute.jit
def wait_eq(lock_ptr: cute.Pointer, thread_idx: int | Int32, flag_offset: int, val: Int32) -> None:
flag_ptr = lock_ptr + flag_offset
if thread_idx == 0:
read_val = Int32(0)
while read_val != val:
read_val = ld_acquire(flag_ptr)
@cute.jit
def arrive_inc(
lock_ptr: cute.Pointer, thread_idx: int | Int32, flag_offset: int, val: cutlass.Constexpr[Int32]
) -> None:
flag_ptr = lock_ptr + flag_offset
if thread_idx == 0:
red_release(flag_ptr, val)
# red_relaxed(flag_ptr, val)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/barrier.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Dao-AILab/flash-attention:flash_attn/cute/flash_bwd_sm100.py | # Copyright (c) 2025, Ted Zadouri, Markus Hoehnerbach, Jay Shah, Tri Dao.
import math
from typing import Callable, Optional
from functools import partial
import cuda.bindings.driver as cuda
import cutlass
import cutlass.cute as cute
from cutlass.cute import FastDivmodDivisor
from cutlass import Float32, Int32, Int64, const_expr
from cutlass.utils import LayoutEnum
from cutlass.cute.nvgpu import cpasync, tcgen05
import cutlass.utils.blackwell_helpers as sm100_utils_basic
from cutlass.pipeline import PipelineAsync
import quack.activation
from quack import layout_utils
from flash_attn.cute import utils
from flash_attn.cute.cute_dsl_utils import assume_tensor_aligned
from flash_attn.cute import copy_utils
from flash_attn.cute import pipeline
from flash_attn.cute.blackwell_helpers import gemm_w_idx, gemm_ptx_w_idx # noqa
from flash_attn.cute.mask import AttentionMask
from flash_attn.cute.seqlen_info import SeqlenInfoQK
from flash_attn.cute.block_info import BlockInfo
from quack.cute_dsl_utils import ParamsBase
from flash_attn.cute.tile_scheduler import (
TileSchedulerArguments,
SingleTileScheduler,
SingleTileLPTBwdScheduler, # noqa
SingleTileVarlenScheduler,
)
from flash_attn.cute import barrier
from flash_attn.cute.named_barrier import NamedBarrierBwdSm100
from flash_attn.cute.softmax import apply_score_mod_inner, apply_score_mod_bwd_inner
from flash_attn.cute.block_sparsity import BlockSparseTensors
from flash_attn.cute.block_sparse_utils import (
get_total_q_block_count_bwd,
get_block_sparse_iteration_info_bwd,
get_m_block_from_iter_bwd,
produce_block_sparse_q_loads_bwd_sm100,
)
class FlashAttentionBackwardSm100:
arch = 100
def __init__(
self,
head_dim: int,
head_dim_v: Optional[int] = None,
is_causal: bool = False,
is_local: bool = False,
qhead_per_kvhead: cutlass.Constexpr[int] = 1,
tile_m: int = 128,
tile_n: int = 128,
is_persistent: bool = False,
deterministic: bool = False,
cluster_size: int = 1,
use_2cta_instrs: bool = False,
score_mod: cutlass.Constexpr | None = None,
score_mod_bwd: cutlass.Constexpr | None = None,
mask_mod: cutlass.Constexpr | None = None,
has_aux_tensors: cutlass.Constexpr = False,
subtile_factor: cutlass.Constexpr[int] = 1,
):
# padding head_dim to a multiple of 16 as k_block_size
hdim_multiple_of = 16
self.tile_hdim = int(math.ceil(head_dim / hdim_multiple_of) * hdim_multiple_of)
head_dim_v = head_dim_v if head_dim_v is not None else head_dim
self.same_hdim_kv = head_dim == head_dim_v
self.tile_hdimv = int(math.ceil(head_dim_v / hdim_multiple_of) * hdim_multiple_of)
self.check_hdim_oob = head_dim != self.tile_hdim
self.check_hdim_v_oob = head_dim_v != self.tile_hdimv
self.tile_m = tile_m
self.tile_n = tile_n
assert self.tile_hdim <= 128 or (self.tile_hdim == 192 and self.tile_hdimv == 128)
assert self.tile_hdimv <= 128
self.use_2cta_instrs = bool(
use_2cta_instrs
and cluster_size == 2
and not is_local
and score_mod is None
and score_mod_bwd is None
and mask_mod is None
)
self.cta_group_size = 2 if self.use_2cta_instrs else 1
assert self.tile_hdim != 192 or self.use_2cta_instrs, "Must use 2CTA for hdim 192"
# CTA tiler
self.cta_tiler = (tile_n, tile_m, self.tile_hdim)
# S = K @ Q.T
self.mma_tiler_kq = (self.cta_group_size * tile_n, tile_m, self.tile_hdim)
# dP = V @ dO.T
self.mma_tiler_vdo = (self.cta_group_size * tile_n, tile_m, self.tile_hdimv)
# dV = P.T @ dO
self.mma_tiler_pdo = (self.cta_group_size * tile_n, self.tile_hdimv, tile_m)
# dK = dS.T @ Q
self.mma_tiler_dsq = (self.cta_group_size * tile_n, self.tile_hdim, tile_m)
# dQ = dS @ K
# 2-CTA: reduction dim is cluster-wide (tile_n * cta_group_size).
self.mma_tiler_dsk = (tile_m, self.tile_hdim, tile_n * self.cta_group_size)
self.acc_dtype = Float32
assert cluster_size in (1, 2), "Only cluster_size=1 or 2 is supported"
self.cluster_shape_mn = (cluster_size, 1)
self.is_persistent = is_persistent
self.is_causal = is_causal
self.is_local = is_local
self.qhead_per_kvhead = qhead_per_kvhead
self.pack_gqa = False
self.deterministic = deterministic
# Score mod and mask mod support
self.score_mod = score_mod
self.score_mod_bwd = score_mod_bwd
self.mask_mod = mask_mod
self.has_aux_tensors = has_aux_tensors
self.subtile_factor = subtile_factor
# For score_mod, use vec_size=1 (like forward) to handle per-element indices
if cutlass.const_expr(has_aux_tensors):
self.vec_size: cutlass.Constexpr = 1
else:
self.vec_size: cutlass.Constexpr = 4
self.qk_acc_dtype = Float32
# Speed optimizations, does not affect correctness
self.shuffle_LSE = False
self.shuffle_dPsum = False
# Generally slower to use store dS in smem for dK, and doesn't work for 2cta
self.use_smem_dS_for_mma_dK = False
self.reduce_warp_ids = (0, 1, 2, 3)
self.compute_warp_ids = (4, 5, 6, 7, 8, 9, 10, 11)
self.mma_warp_id = 12
self.load_warp_id = 13
self.relay_warp_id = 14
self.empty_warp_id = 15
# 16 warps -> 512 threads
self.threads_per_cta = cute.arch.WARP_SIZE * len(
(
*self.reduce_warp_ids,
*self.compute_warp_ids,
self.mma_warp_id,
self.load_warp_id,
self.relay_warp_id,
self.empty_warp_id,
)
)
# NamedBarrier
self.compute_sync_barrier = cutlass.pipeline.NamedBarrier(
barrier_id=int(NamedBarrierBwdSm100.Compute),
num_threads=len(self.compute_warp_ids) * cute.arch.WARP_SIZE,
)
# self.epilogue_sync_barrier = pipeline.NamedBarrier(
# barrier_id=2,
# num_threads=self.num_compute_warps * self.threads_per_warp,
# )
self.reduce_sync_barrier = cutlass.pipeline.NamedBarrier(
barrier_id=int(NamedBarrierBwdSm100.dQaccReduce),
num_threads=len(self.reduce_warp_ids) * cute.arch.WARP_SIZE,
)
# TMEM setup
self.tmem_alloc_cols = cute.arch.get_max_tmem_alloc_cols("sm_100")
# self.tmem_dK_offset = 0
# self.tmem_dV_offset = self.tmem_dK_offset + self.tile_hdim
# self.tmem_dQ_offset = self.tmem_dV_offset + self.tile_hdimv
# self.tmem_dP_offset = self.tmem_dQ_offset # overlap with dQ
# self.tmem_S_offset = self.tmem_dQ_offset + max(self.tile_m, self.tile_hdim)
# self.tmem_P_offset = self.tmem_S_offset # overlap with S
# self.tmem_total = self.tmem_S_offset + self.tile_n
# assert self.tmem_total <= self.tmem_alloc_cols
if self.use_2cta_instrs and self.tile_hdim == 192 and self.tile_hdimv == 128:
assert self.tile_m == 128
assert self.tile_n == 128
self.tmem_dV_offset = 0
self.tmem_dK_offset = self.tmem_dV_offset + self.tile_hdimv
self.tmem_S_offset = self.tmem_dK_offset + self.tile_hdim
self.tmem_P_offset = self.tmem_S_offset # overlap with S
self.tmem_dP_offset = 512 - self.tile_m
self.tmem_dS_offset = self.tmem_dP_offset # overlaps with dP
self.tmem_dQ_offset = 512 - self.tile_hdim // 2
else:
self.tmem_S_offset = 0
self.tmem_P_offset = 0 # overlap with S
self.tmem_dV_offset = self.tmem_S_offset + self.tile_n
self.tmem_dP_offset = self.tmem_dV_offset + self.tile_hdimv
self.tmem_dQ_offset = (
(self.tmem_S_offset + (self.tile_hdim // 2))
if self.use_2cta_instrs
else self.tmem_dP_offset
)
self.tmem_dK_offset = self.tmem_dP_offset + self.tile_m
self.tmem_dS_offset = self.tmem_dP_offset # overlap with dP
if (not is_causal and not is_local) or deterministic:
self.num_regs_reduce = 136 if self.use_2cta_instrs else 152
self.num_regs_compute = 136
self.num_regs_load = 104 if self.use_2cta_instrs else 96 - 8
self.num_regs_mma = 104 if self.use_2cta_instrs else self.num_regs_load
else:
self.num_regs_reduce = 136 if self.use_2cta_instrs else 136
self.num_regs_compute = 136 if self.use_2cta_instrs else 144
self.num_regs_load = 104 if self.use_2cta_instrs else 96 - 8
self.num_regs_mma = 104 if self.use_2cta_instrs else self.num_regs_load
self.num_regs_empty = 24
if const_expr(self.tile_hdim == 192):
if not is_causal and not is_local:
self.num_regs_reduce = 128 + 8
self.num_regs_compute = 128 + 8
self.num_regs_load = 128 - 24
self.num_regs_mma = self.num_regs_load
else:
self.num_regs_reduce = 128 + 8
self.num_regs_compute = 128 + 8
self.num_regs_load = 128 - 24
self.num_regs_mma = self.num_regs_load
assert (
self.num_regs_reduce
+ self.num_regs_compute * 2
+ max(self.num_regs_load, self.num_regs_mma)
<= 512
)
self.buffer_align_bytes = 1024
def _setup_attributes(self):
self.Q_stage = 1 if self.use_2cta_instrs else 2
self.dO_stage = 1
self.single_stage = 1
# LSE_stage = Q_stage and dPsum_stage = dO_stage
self.sdKVaccum_stage = 2
# number of tma reduce adds per dQacc mma
# todo: try 32/1 or 48/2 for 2cta d=192 dv=128
if self.use_2cta_instrs and self.tile_hdim == 192:
self.dQ_reduce_ncol_t2r = 32
self.dQ_reduce_ncol = 24 if not self.is_causal else 32
self.sdQaccum_stage = 2 if not self.is_causal else 1
else:
if self.use_2cta_instrs:
self.dQ_reduce_ncol = 16 if self.deterministic else 8
self.sdQaccum_stage = 2 if self.deterministic else 4
self.dQ_reduce_ncol_t2r = 32
else:
self.dQ_reduce_ncol = 32
self.sdQaccum_stage = 64 // self.dQ_reduce_ncol
self.dQ_reduce_ncol_t2r = self.dQ_reduce_ncol
assert (self.tile_hdim // self.cta_group_size) % self.dQ_reduce_ncol == 0
self.dQaccum_reduce_stage = self.tile_hdim // self.dQ_reduce_ncol
self.dQaccum_reduce_stage_t2r = self.tile_hdim // self.dQ_reduce_ncol_t2r
self.cluster_reduce_dQ = False and cute.size(self.cluster_shape_mn) > 1
# number of tma reduce adds for dKacc and dVacc epilogue (must divide hdim_per_wg)
self.dK_reduce_ncol = math.gcd(32, self.tile_hdim // 2)
# CTA group for MMA operations
self.cta_group = tcgen05.CtaGroup.TWO if self.use_2cta_instrs else tcgen05.CtaGroup.ONE
def _get_tiled_mma(self):
# S.T = K @ Q.T
tiled_mma_S = sm100_utils_basic.make_trivial_tiled_mma(
self.q_dtype,
tcgen05.OperandMajorMode.K,
tcgen05.OperandMajorMode.K,
self.acc_dtype,
self.cta_group,
self.mma_tiler_kq[:2],
)
# dP.T = V @ dO.T
tiled_mma_dP = sm100_utils_basic.make_trivial_tiled_mma(
self.do_dtype,
tcgen05.OperandMajorMode.K,
tcgen05.OperandMajorMode.K,
self.acc_dtype,
self.cta_group,
self.mma_tiler_vdo[:2],
)
# dV += P.T @ dO --> (K, MN) major
tiled_mma_dV = sm100_utils_basic.make_trivial_tiled_mma(
self.do_dtype,
tcgen05.OperandMajorMode.K, # P_major_mode
tcgen05.OperandMajorMode.MN, # dO_major_mode
self.acc_dtype,
self.cta_group,
self.mma_tiler_pdo[:2],
a_source=tcgen05.OperandSource.TMEM,
)
# dK += dS.T @ Q
if const_expr(self.use_smem_dS_for_mma_dK):
mma_dK_a_src = tcgen05.OperandSource.SMEM
else:
mma_dK_a_src = tcgen05.OperandSource.TMEM
tiled_mma_dK = sm100_utils_basic.make_trivial_tiled_mma(
self.do_dtype,
tcgen05.OperandMajorMode.K, # dS_major_mode
tcgen05.OperandMajorMode.MN, # Q_major_mode
self.acc_dtype,
self.cta_group,
self.mma_tiler_dsq[:2],
a_source=mma_dK_a_src,
)
# dQ = dS @ K
tiled_mma_dQ = sm100_utils_basic.make_trivial_tiled_mma(
self.k_dtype,
tcgen05.OperandMajorMode.MN, # dS_major_mode
tcgen05.OperandMajorMode.MN, # Kt_major_mode
self.acc_dtype,
self.cta_group,
self.mma_tiler_dsk[:2],
)
return tiled_mma_S, tiled_mma_dP, tiled_mma_dK, tiled_mma_dV, tiled_mma_dQ
def _setup_smem_layout(self):
# S.T = K @ Q.T
sK_layout = sm100_utils_basic.make_smem_layout_a(
self.tiled_mma_S,
self.mma_tiler_kq,
self.k_dtype,
1,
)
self.sK_layout = cute.slice_(sK_layout, (None, None, None, 0))
self.sQ_layout = sm100_utils_basic.make_smem_layout_b(
self.tiled_mma_S,
self.mma_tiler_kq,
self.q_dtype,
self.Q_stage,
)
# dP.T = V @ dO.T
sV_layout = sm100_utils_basic.make_smem_layout_a(
self.tiled_mma_dP,
self.mma_tiler_vdo,
self.v_dtype,
1,
)
self.sV_layout = cute.slice_(sV_layout, (None, None, None, 0))
self.sdOt_layout = sm100_utils_basic.make_smem_layout_b(
self.tiled_mma_dP,
self.mma_tiler_vdo,
self.do_dtype,
self.dO_stage,
)
# dV += P.T @ dO
tP_layout = sm100_utils_basic.make_smem_layout_a(
self.tiled_mma_dV,
self.mma_tiler_pdo,
self.do_dtype,
1,
)
self.tP_layout = cute.slice_(tP_layout, (None, None, None, 0))
self.sdO_layout = sm100_utils_basic.make_smem_layout_b(
self.tiled_mma_dV,
self.mma_tiler_pdo,
self.do_dtype,
self.dO_stage,
)
# dK += dS.T @ Q
sdSt_layout = sm100_utils_basic.make_smem_layout_a(
self.tiled_mma_dK,
self.mma_tiler_dsq,
self.ds_dtype,
1,
)
self.sdSt_layout = cute.slice_(sdSt_layout, (None, None, None, 0))
tdS_layout = sm100_utils_basic.make_smem_layout_a(
self.tiled_mma_dK,
self.mma_tiler_dsq,
self.ds_dtype,
1,
)
self.tdS_layout = cute.slice_(tdS_layout, (None, None, None, 0))
self.sQt_layout = sm100_utils_basic.make_smem_layout_b(
self.tiled_mma_dK,
self.mma_tiler_dsq,
self.q_dtype,
self.Q_stage,
)
# dQ = dS @ K
sdS_layout = sm100_utils_basic.make_smem_layout_a(
self.tiled_mma_dQ,
self.mma_tiler_dsk,
self.ds_dtype,
1,
)
self.sdS_layout = cute.slice_(sdS_layout, (None, None, None, 0))
sKt_layout = sm100_utils_basic.make_smem_layout_b(
self.tiled_mma_dQ,
self.mma_tiler_dsk,
self.k_dtype,
1,
)
self.sKt_layout = cute.slice_(sKt_layout, (None, None, None, 0))
self.sdS_xchg_layout = cute.make_layout(shape=(self.tile_n, self.tile_m // 2))
self.sdQaccum_layout = cute.make_layout(
(self.tile_m * self.dQ_reduce_ncol, self.sdQaccum_stage)
)
self.sLSE_layout = cute.make_layout(
shape=(self.tile_m, self.Q_stage), stride=(1, cute.round_up(self.tile_m, 64))
)
self.sdPsum_layout = cute.make_layout(
shape=(self.tile_m, self.dO_stage),
stride=(1, cute.round_up(self.tile_m, 64)),
)
self.sdK_epi_tile = (
self.tile_n,
math.gcd(128 // (self.dk_dtype.width // 8), self.tile_hdim // 2), # 64 or 32
) # subtiles mma_tiler_dsq[:2] = mma_tiler_pdo[:2]
self.sdV_epi_tile = (
self.tile_n,
math.gcd(128 // (self.dk_dtype.width // 8), self.tile_hdimv // 2), # 64 or 32
) # subtiles mma_tiler_dsq[:2] = mma_tiler_pdo[:2]
# headdim_64 gets 1 stage
self.num_epi_stages = max(1, (self.tile_hdim // 2) // self.sdK_epi_tile[1])
self.num_epi_stages_v = max(1, (self.tile_hdimv // 2) // self.sdV_epi_tile[1])
self.sdK_flat_epi_tile = self.tile_n * (self.tile_hdim // 2) // self.num_epi_stages
self.sdV_flat_epi_tile = self.tile_n * (self.tile_hdimv // 2) // self.num_epi_stages_v
if const_expr(not self.dKV_postprocess):
self.sdK_layout = sm100_utils_basic.make_smem_layout_epi(
self.dk_dtype,
LayoutEnum.ROW_MAJOR,
self.sdK_epi_tile,
2, # num compute wgs
)
self.sdV_layout = sm100_utils_basic.make_smem_layout_epi(
self.dv_dtype,
LayoutEnum.ROW_MAJOR,
self.sdV_epi_tile,
2, # num compute wgs
)
else:
self.sdK_layout = cute.make_layout((self.tile_n * self.dK_reduce_ncol, 2))
# self.dK_reduce_ncol same for dV
self.sdV_layout = cute.make_layout((self.tile_n * self.dK_reduce_ncol, 2))
@cute.jit
def __call__(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mdO: cute.Tensor,
mLSE: cute.Tensor,
mdPsum: cute.Tensor,
mdQaccum: cute.Tensor,
mdK: cute.Tensor,
mdV: cute.Tensor,
softmax_scale: Float32,
stream: cuda.CUstream,
mCuSeqlensQ: Optional[cute.Tensor] = None,
mCuSeqlensK: Optional[cute.Tensor] = None,
mSeqUsedQ: Optional[cute.Tensor] = None,
mSeqUsedK: Optional[cute.Tensor] = None,
softcap: Float32 | float | None = None,
window_size_left: Int32 | int | None = None,
window_size_right: Int32 | int | None = None,
mdQ_semaphore: Optional[cute.Tensor] = None,
mdK_semaphore: Optional[cute.Tensor] = None,
mdV_semaphore: Optional[cute.Tensor] = None,
aux_tensors: Optional[list] = None,
# Block-sparse tensors (Q direction - for iterating m_blocks per n_block):
blocksparse_tensors: Optional[BlockSparseTensors] = None,
):
self.q_dtype = mQ.element_type
self.k_dtype = mK.element_type
self.v_dtype = mV.element_type
self.do_dtype = mdO.element_type
self.lse_dtype = mLSE.element_type
self.dpsum_dtype = mdPsum.element_type
self.dqaccum_dtype = mdQaccum.element_type
self.dk_dtype = mdK.element_type
self.dv_dtype = mdV.element_type
self.ds_dtype = self.q_dtype
self.is_varlen_k = mCuSeqlensK is not None or mSeqUsedK is not None
self.is_varlen_q = mCuSeqlensQ is not None or mSeqUsedQ is not None
self.use_tma_store = not (self.qhead_per_kvhead == 1 and mCuSeqlensK is not None)
# self.use_tma_store = not self.qhead_per_kvhead == 1
self.dKV_postprocess = self.qhead_per_kvhead > 1
if const_expr(self.dKV_postprocess):
assert self.dk_dtype.width == 32, "Must accumulate dK in float precision for GQA"
assert self.dv_dtype.width == 32, "Must accumulate dV in float precision for GQA"
mdQaccum, mdK, mdV = [assume_tensor_aligned(t) for t in (mdQaccum, mdK, mdV)]
# (b, s, n, h) --> (s, h, n, b) or (t, n, h) -> (t, h, n)
QO_layout_transpose = [1, 3, 2, 0] if const_expr(mCuSeqlensQ is None) else [0, 2, 1]
mQ, mdO = [layout_utils.select(t, mode=QO_layout_transpose) for t in (mQ, mdO)]
KV_layout_transpose = [1, 3, 2, 0] if const_expr(mCuSeqlensK is None) else [0, 2, 1]
mK, mV = [layout_utils.select(t, mode=KV_layout_transpose) for t in (mK, mV)]
# (b, n, s) --> (s, n, b) or (n, t) --> (t, n)
LSE_dPsum_dQaccum_transpose = [2, 1, 0] if const_expr(mCuSeqlensQ is None) else [1, 0]
mLSE, mdPsum, mdQaccum = [
layout_utils.select(t, mode=LSE_dPsum_dQaccum_transpose)
for t in (mLSE, mdPsum, mdQaccum)
]
if const_expr(not self.dKV_postprocess):
layout_dKV_transpose = KV_layout_transpose
else:
layout_dKV_transpose = [2, 1, 0] if const_expr(mCuSeqlensK is None) else [1, 0]
mdK, mdV = [layout_utils.select(t, mode=layout_dKV_transpose) for t in (mdK, mdV)]
# (s, h, n, b) --> (h, s, n, b) or (t, h, n) -> (h, t, b)
dO_transpose = [1, 0, 2, 3] if const_expr(mCuSeqlensQ is None) else [1, 0, 2]
mdO = layout_utils.select(mdO, mode=dO_transpose)
# Transposes for 2-CTA K/Q paths (Q follows Q seqlens, K follows K seqlens)
transpose_sh_q = dO_transpose
transpose_sh_k = [1, 0, 2, 3] if const_expr(mCuSeqlensK is None) else [1, 0, 2]
# (b, n, block, stage) -> (block, stage, n, b)
semaphore_transpose = [2, 3, 1, 0]
if const_expr(self.deterministic):
assert mdQ_semaphore is not None
mdQ_semaphore = layout_utils.select(mdQ_semaphore, mode=semaphore_transpose)
if const_expr(self.deterministic and self.qhead_per_kvhead > 1):
assert mdK_semaphore is not None
assert mdV_semaphore is not None
mdK_semaphore, mdV_semaphore = [
layout_utils.select(t, mode=semaphore_transpose)
for t in (mdK_semaphore, mdV_semaphore)
]
else:
mdK_semaphore = None
mdV_semaphore = None
self._setup_attributes()
(
self.tiled_mma_S,
self.tiled_mma_dP,
self.tiled_mma_dK,
self.tiled_mma_dV,
self.tiled_mma_dQ,
) = self._get_tiled_mma()
self._setup_smem_layout()
self.cluster_shape_mnk = (*self.cluster_shape_mn, 1)
self.cluster_layout_vmnk = cute.tiled_divide(
cute.make_layout(self.cluster_shape_mnk),
(self.tiled_mma_S.thr_id.shape,),
)
self.num_mcast_ctas_b = cute.size(self.cluster_layout_vmnk.shape[1])
self.is_q_do_mcast = self.num_mcast_ctas_b > 1
if const_expr(not self.dKV_postprocess):
self.mdK_layout_enum = LayoutEnum.from_tensor(mdK)
self.mdV_layout_enum = LayoutEnum.from_tensor(mdV)
dK_major_mode = self.mdK_layout_enum.mma_major_mode()
dV_major_mode = self.mdV_layout_enum.mma_major_mode()
if const_expr(dK_major_mode != tcgen05.OperandMajorMode.K):
raise RuntimeError("The layout of mdK is wrong")
if const_expr(dV_major_mode != tcgen05.OperandMajorMode.K):
raise RuntimeError("The layout of mdV is wrong")
if const_expr(self.use_tma_store and not self.dKV_postprocess):
tma_copy_op_dKV = cpasync.CopyBulkTensorTileS2GOp()
tma_atom_dK, mdK_tma_tensor = cpasync.make_tiled_tma_atom(
tma_copy_op_dKV,
mdK,
cute.select(self.sdK_layout, mode=[0, 1]),
self.sdK_epi_tile,
1, # no mcast
)
tma_atom_dV, mdV_tma_tensor = cpasync.make_tiled_tma_atom(
tma_copy_op_dKV,
mdV,
cute.select(self.sdV_layout, mode=[0, 1]),
self.sdV_epi_tile,
1, # no mcast
)
else:
mdV_tma_tensor = mdV
mdK_tma_tensor = mdK
tma_atom_dV = None
tma_atom_dK = None
if const_expr(not self.dKV_postprocess):
thr_layout_r2s_dKV = cute.make_ordered_layout((128, 1), order=(1, 0)) # 128 threads
val_layout_r2s_dKV = cute.make_ordered_layout(
(1, 128 // self.dk_dtype.width), order=(1, 0)
) # 4 or 8 vals for 16 byte store
copy_atom_r2s_dKV = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(),
self.dk_dtype,
num_bits_per_copy=128,
)
tiled_copy_r2s_dKV = cute.make_tiled_copy_tv(
copy_atom_r2s_dKV, thr_layout_r2s_dKV, val_layout_r2s_dKV
)
else:
tiled_copy_r2s_dKV = copy_utils.tiled_copy_1d(
Float32, 128, num_copy_elems=128 // Float32.width
)
tma_load_op = cpasync.CopyBulkTensorTileG2SOp(self.cta_group)
# S.T = K @ Q.T
tma_atom_K, tma_tensor_K = cute.nvgpu.make_tiled_tma_atom_A(
tma_load_op,
mK,
cute.select(self.sK_layout, mode=[0, 1, 2]),
self.mma_tiler_kq,
self.tiled_mma_S,
self.cluster_layout_vmnk.shape,
)
Q_tma_op = sm100_utils_basic.cluster_shape_to_tma_atom_B(
self.cluster_shape_mnk, self.tiled_mma_S.thr_id
)
tma_atom_Q, tma_tensor_Q = cute.nvgpu.make_tiled_tma_atom_B(
Q_tma_op,
mQ,
cute.select(self.sQ_layout, mode=[0, 1, 2]),
self.mma_tiler_kq,
self.tiled_mma_S,
self.cluster_layout_vmnk.shape,
)
# dP.T = V @ dO.T
tma_atom_V, tma_tensor_V = cute.nvgpu.make_tiled_tma_atom_A(
tma_load_op,
mV,
cute.select(self.sV_layout, mode=[0, 1, 2]),
self.mma_tiler_vdo,
self.tiled_mma_dP,
self.cluster_layout_vmnk.shape,
)
# dV = P.T @ dO
dO_tma_op = sm100_utils_basic.cluster_shape_to_tma_atom_B(
self.cluster_shape_mnk, self.tiled_mma_dV.thr_id
)
tma_atom_dO, tma_tensor_dO = cute.nvgpu.make_tiled_tma_atom_B(
dO_tma_op,
mdO,
cute.select(self.sdO_layout, mode=[0, 1, 2]),
self.mma_tiler_pdo,
self.tiled_mma_dV,
self.cluster_layout_vmnk.shape,
)
# ------------------------------------------------------------
# 2-CTA
# ------------------------------------------------------------
tma_atom_dOt = tma_tensor_dOt = None
if const_expr(self.use_2cta_instrs):
tma_atom_dOt, tma_tensor_dOt = cute.nvgpu.make_tiled_tma_atom_B(
dO_tma_op,
layout_utils.select(mdO, mode=transpose_sh_q),
cute.select(self.sdOt_layout, mode=[0, 1, 2]),
self.mma_tiler_vdo,
self.tiled_mma_dP,
self.cluster_layout_vmnk.shape,
)
tma_atom_Qt = tma_tensor_Qt = None
if const_expr(self.use_2cta_instrs):
tma_atom_Qt, tma_tensor_Qt = cute.nvgpu.make_tiled_tma_atom_B(
Q_tma_op,
layout_utils.select(mQ, mode=transpose_sh_q),
cute.select(self.sQt_layout, mode=[0, 1, 2]),
self.mma_tiler_dsq,
self.tiled_mma_dK,
self.cluster_layout_vmnk.shape,
)
tma_atom_Kt = tma_tensor_Kt = None
if const_expr(self.use_2cta_instrs):
Kt_tma_op = sm100_utils_basic.cluster_shape_to_tma_atom_B(
self.cluster_shape_mnk, self.tiled_mma_dQ.thr_id
)
tma_atom_Kt, tma_tensor_Kt = cute.nvgpu.make_tiled_tma_atom_B(
Kt_tma_op,
layout_utils.select(mK, mode=transpose_sh_k),
cute.select(self.sKt_layout, mode=[0, 1, 2]),
self.mma_tiler_dsk,
self.tiled_mma_dQ,
self.cluster_layout_vmnk.shape,
)
self.tma_copy_bytes = {
name: self.cta_group_size
* cute.size_in_bytes(mX.element_type, cute.select(layout, mode=[0, 1, 2]))
for name, mX, layout in [
("Q", mQ, self.sQ_layout),
("K", mK, self.sK_layout),
("V", mV, self.sV_layout),
("dO", mdO, self.sdO_layout),
]
}
self.tma_copy_bytes["LSE"] = self.tile_m * Float32.width // 8
self.tma_copy_bytes["dPsum"] = self.tile_m * Float32.width // 8
self.tma_copy_bytes["dQ"] = self.tile_m * self.dQ_reduce_ncol * Float32.width // 8
self.tma_copy_bytes["dKacc"] = self.tile_n * self.dK_reduce_ncol * Float32.width // 8
self.tma_copy_bytes["dS"] = cute.size_in_bytes(self.ds_dtype, self.sdS_layout)
self.tma_copy_bytes["sdS_xchg"] = self.tma_copy_bytes["dS"] // 2 # Half of dS for exchange
# TileScheduler = SingleTileScheduler
if const_expr(self.is_varlen_k):
TileScheduler = SingleTileVarlenScheduler
elif const_expr(self.deterministic):
TileScheduler = SingleTileLPTBwdScheduler
else:
TileScheduler = SingleTileScheduler
self.spt = (self.is_causal or self.is_local) and self.deterministic
tile_sched_args = TileSchedulerArguments(
cute.ceil_div(cute.size(mK.shape[0]), self.cta_tiler[0]), # num_blocks
cute.size(mQ.shape[2]), # num_heads = num_query_heads
cute.size(mK.shape[3])
if const_expr(mCuSeqlensK is None)
else cute.size(mCuSeqlensK.shape[0] - 1), # num_batches
1, # num_splits
cute.size(mQ.shape[0]), # pass seqlen_q or total_q for seqlen_k
mQ.shape[1], # headdim
mV.shape[1], # headdim_v
total_q=cute.size(mK.shape[0]) # pass total_k for total_q
if const_expr(mCuSeqlensK is not None)
else cute.size(mK.shape[0]) * cute.size(mK.shape[3]),
tile_shape_mn=self.cta_tiler[:2], # (tile_n, tile_m)
cluster_shape_mn=self.cluster_shape_mnk[:2],
mCuSeqlensQ=mCuSeqlensK,
mSeqUsedQ=mSeqUsedK,
qhead_per_kvhead_packgqa=1, # pack_gqa disabled for bwd
element_size=self.k_dtype.width // 8,
is_persistent=self.is_persistent, # persistent mode not tested
lpt=self.spt,
head_swizzle=self.deterministic,
)
tile_sched_params = TileScheduler.to_underlying_arguments(tile_sched_args)
self.tile_scheduler_cls = TileScheduler
grid_dim = TileScheduler.get_grid_shape(tile_sched_params)
# Compute allocation sizes for shared buffers that are reused
# sQ is reused for sdK, sdO is reused for sdV
sQ_alloc_bytes = max(
cute.size_in_bytes(self.q_dtype, self.sQ_layout),
cute.size_in_bytes(self.dk_dtype, self.sdK_layout),
)
sdO_alloc_bytes = max(
cute.size_in_bytes(self.dv_dtype, self.sdV_layout),
cute.size_in_bytes(self.do_dtype, self.sdO_layout),
)
sdK_bytes = cute.size_in_bytes(self.dk_dtype, self.sdK_layout)
sdV_bytes = cute.size_in_bytes(self.dv_dtype, self.sdV_layout)
assert sdV_bytes <= sdO_alloc_bytes, "sdV doesn't fit in sdO storage allocation"
assert sdK_bytes <= sQ_alloc_bytes, "sdK doesn't fit in sQ storage allocation"
# 2-CTA: sdV reuses sV, sdK reuses sK
sV_bytes = cute.size_in_bytes(self.v_dtype, self.sV_layout)
sK_bytes = cute.size_in_bytes(self.k_dtype, self.sK_layout)
if const_expr(self.use_2cta_instrs):
assert sdV_bytes <= sV_bytes, "sdV doesn't fit in sV storage allocation (2-CTA)"
assert sdK_bytes <= sK_bytes, "sdK doesn't fit in sK storage allocation (2-CTA)"
if const_expr(self.use_2cta_instrs):
sQt_size = cute.cosize(self.sQt_layout) if const_expr(self.tile_hdim <= 128) else 0
sdOt_size = cute.cosize(self.sdOt_layout) if const_expr(self.tile_hdim <= 128) else 0
sdS_xchg_size = (
cute.cosize(self.sdS_xchg_layout) if const_expr(self.tile_hdim <= 128) else 0
)
@cute.struct
class SharedStorage:
Q_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.Q_stage]
dO_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.dO_stage]
LSE_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.Q_stage]
dPsum_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.dO_stage]
S_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.single_stage]
dP_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.single_stage]
dS_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.single_stage]
dKV_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.sdKVaccum_stage]
dQ_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2]
dQ_cluster_full_mbar_ptr: cute.struct.MemRange[
cutlass.Int64, self.dQaccum_reduce_stage // 2
]
dQ_cluster_empty_mbar_ptr: cute.struct.MemRange[
cutlass.Int64, self.dQaccum_reduce_stage // 2
]
tmem_holding_buf: Int32
tmem_dealloc_mbar_ptr: cutlass.Int64
# 2-CTA
Qt_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.Q_stage]
Kt_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.single_stage]
dS_cluster_empty_mbar_ptr: cutlass.Int64
dS_cluster_full_mbar_ptr: cutlass.Int64
dS_cluster_leader_mbar_ptr: cutlass.Int64
dQaccum_empty_mbar_ptr: cutlass.Int64
sQ: cute.struct.Align[
cute.struct.MemRange[self.q_dtype, cute.cosize(self.sQ_layout)],
self.buffer_align_bytes,
]
sK: cute.struct.Align[
cute.struct.MemRange[self.k_dtype, cute.cosize(self.sK_layout)],
self.buffer_align_bytes,
]
sV: cute.struct.Align[
cute.struct.MemRange[self.v_dtype, cute.cosize(self.sV_layout)],
self.buffer_align_bytes,
]
sdO: cute.struct.Align[
cute.struct.MemRange[self.do_dtype, cute.cosize(self.sdO_layout)],
self.buffer_align_bytes,
]
sQt: cute.struct.Align[
cute.struct.MemRange[self.q_dtype, sQt_size],
self.buffer_align_bytes,
]
sdOt: cute.struct.Align[
cute.struct.MemRange[self.do_dtype, sdOt_size],
self.buffer_align_bytes,
]
sdS_xchg: cute.struct.Align[
cute.struct.MemRange[self.ds_dtype, sdS_xchg_size],
self.buffer_align_bytes,
]
sKt: cute.struct.Align[
cute.struct.MemRange[self.k_dtype, cute.cosize(self.sKt_layout)],
self.buffer_align_bytes,
]
sdS: cute.struct.Align[
cute.struct.MemRange[self.ds_dtype, cute.cosize(self.sdSt_layout)],
self.buffer_align_bytes,
]
sLSE: cute.struct.Align[
cute.struct.MemRange[self.lse_dtype, cute.cosize(self.sLSE_layout)],
128,
]
sdPsum: cute.struct.Align[
cute.struct.MemRange[self.dpsum_dtype, cute.cosize(self.sdPsum_layout)],
128,
]
sdQaccum: cute.struct.Align[
cute.struct.MemRange[self.dqaccum_dtype, cute.cosize(self.sdQaccum_layout)],
self.buffer_align_bytes if sdS_xchg_size == 0 else 128,
]
else:
@cute.struct
class SharedStorage:
Q_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.Q_stage]
dO_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.dO_stage]
LSE_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.Q_stage]
dPsum_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.dO_stage]
S_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.single_stage]
dP_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.single_stage]
dS_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.single_stage]
dKV_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2 * self.sdKVaccum_stage]
dQ_mbar_ptr: cute.struct.MemRange[cutlass.Int64, 2]
dQ_cluster_full_mbar_ptr: cute.struct.MemRange[
cutlass.Int64, self.dQaccum_reduce_stage // 2
]
dQ_cluster_empty_mbar_ptr: cute.struct.MemRange[
cutlass.Int64, self.dQaccum_reduce_stage // 2
]
tmem_holding_buf: Int32
tmem_dealloc_mbar_ptr: Int64
sQ: cute.struct.Align[
cute.struct.MemRange[cute.Uint8, sQ_alloc_bytes],
self.buffer_align_bytes,
]
sK: cute.struct.Align[
cute.struct.MemRange[self.k_dtype, cute.cosize(self.sK_layout)],
self.buffer_align_bytes,
]
sV: cute.struct.Align[
cute.struct.MemRange[self.v_dtype, cute.cosize(self.sV_layout)],
self.buffer_align_bytes,
]
sdO: cute.struct.Align[
cute.struct.MemRange[cute.Uint8, sdO_alloc_bytes],
self.buffer_align_bytes,
]
sdS: cute.struct.Align[
cute.struct.MemRange[self.ds_dtype, cute.cosize(self.sdSt_layout)],
128,
]
sLSE: cute.struct.Align[
cute.struct.MemRange[self.lse_dtype, cute.cosize(self.sLSE_layout)],
128,
]
sdPsum: cute.struct.Align[
cute.struct.MemRange[self.dpsum_dtype, cute.cosize(self.sdPsum_layout)],
128,
]
sdQaccum: cute.struct.Align[
cute.struct.MemRange[self.dqaccum_dtype, cute.cosize(self.sdQaccum_layout)],
self.buffer_align_bytes,
]
self.shared_storage = SharedStorage
LOG2_E = math.log2(math.e)
if const_expr(self.score_mod is None):
# Without score_mod: bake scale into log2
softmax_scale_log2 = softmax_scale * LOG2_E
else:
# With score_mod: score_mod applied to S * softmax_scale, then use LOG2_E only
softmax_scale_log2 = LOG2_E
if const_expr(window_size_left is not None):
window_size_left = Int32(window_size_left)
if const_expr(window_size_right is not None):
window_size_right = Int32(window_size_right)
fastdiv_mods = None
if const_expr(aux_tensors is not None):
seqlen_q = cute.size(mQ.shape[0]) // (
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1
)
seqlen_k = cute.size(mK.shape[0])
seqlen_q_divmod = FastDivmodDivisor(seqlen_q)
seqlen_k_divmod = FastDivmodDivisor(seqlen_k)
fastdiv_mods = (seqlen_q_divmod, seqlen_k_divmod)
self.use_block_sparsity = cutlass.const_expr(blocksparse_tensors is not None)
if const_expr(self.use_2cta_instrs):
assert blocksparse_tensors is None, (
"2-CTA mode does not support block sparsity. "
"Please create kernel with use_2cta_instrs=False for block sparse attention."
)
assert window_size_left is None and window_size_right is None, (
"2-CTA mode does not support window attention. "
"Please create kernel with use_2cta_instrs=False for window attention."
)
# 2-CTA: 231424 and 1-CTA: 232448
# print("SMEM: ", self.shared_storage.size_in_bytes())
if const_expr(self.use_block_sparsity or aux_tensors is not None):
assert all(x is None for x in (mCuSeqlensQ, mCuSeqlensK, mSeqUsedQ, mSeqUsedK)), (
"Variable sequence length is not supported yet for blocksparse or aux tensors in bwd"
)
self.kernel(
tma_tensor_Q,
tma_tensor_Qt,
tma_tensor_K,
tma_tensor_Kt,
tma_tensor_V,
mLSE,
mdPsum,
tma_tensor_dO,
tma_tensor_dOt,
mdV,
mdK,
mdQaccum,
mdV_tma_tensor,
mdK_tma_tensor,
mdQ_semaphore,
mdK_semaphore,
mdV_semaphore,
mCuSeqlensQ,
mCuSeqlensK,
mSeqUsedQ,
mSeqUsedK,
tma_atom_Q,
tma_atom_Qt,
tma_atom_K,
tma_atom_Kt,
tma_atom_V,
tma_atom_dO,
tma_atom_dOt,
tma_atom_dV,
tma_atom_dK,
self.sQ_layout,
self.sQt_layout,
self.sK_layout,
self.sKt_layout,
self.sV_layout,
self.sLSE_layout,
self.sdPsum_layout,
self.sdO_layout,
self.sdOt_layout,
self.sdSt_layout,
self.sdS_layout,
self.sdS_xchg_layout,
self.sdQaccum_layout,
self.sdK_layout,
self.sdV_layout,
self.tP_layout,
self.tdS_layout,
self.tiled_mma_S,
self.tiled_mma_dP,
self.tiled_mma_dV,
self.tiled_mma_dK,
self.tiled_mma_dQ,
tiled_copy_r2s_dKV,
softmax_scale,
softmax_scale_log2,
window_size_left,
window_size_right,
tile_sched_params,
aux_tensors,
fastdiv_mods,
blocksparse_tensors,
).launch(
grid=grid_dim,
block=[self.threads_per_cta, 1, 1],
cluster=self.cluster_shape_mnk if cute.size(self.cluster_shape_mnk) > 1 else None,
smem=self.shared_storage.size_in_bytes(),
stream=stream,
min_blocks_per_mp=1,
)
@cute.kernel
def kernel(
self,
mQ: cute.Tensor,
mQt: Optional[cute.Tensor],
mK: cute.Tensor,
mKt: Optional[cute.Tensor],
mV: cute.Tensor,
mLSE: cute.Tensor,
mdPsum: cute.Tensor,
mdO: cute.Tensor,
mdOt: Optional[cute.Tensor],
mdV: cute.Tensor,
mdK: cute.Tensor,
mdQaccum: cute.Tensor,
mdV_tma_tensor: Optional[cute.Tensor],
mdK_tma_tensor: Optional[cute.Tensor],
mdQ_semaphore: Optional[cute.Tensor],
mdK_semaphore: Optional[cute.Tensor],
mdV_semaphore: Optional[cute.Tensor],
mCuSeqlensQ: Optional[cute.Tensor],
mCuSeqlensK: Optional[cute.Tensor],
mSeqUsedQ: Optional[cute.Tensor],
mSeqUsedK: Optional[cute.Tensor],
tma_atom_Q: cute.CopyAtom,
tma_atom_Qt: Optional[cute.CopyAtom],
tma_atom_K: cute.CopyAtom,
tma_atom_Kt: Optional[cute.CopyAtom],
tma_atom_V: cute.CopyAtom,
tma_atom_dO: cute.CopyAtom,
tma_atom_dOt: Optional[cute.CopyAtom],
tma_atom_dV: Optional[cute.CopyAtom],
tma_atom_dK: Optional[cute.CopyAtom],
sQ_layout: cute.ComposedLayout,
sQt_layout: cute.ComposedLayout,
sK_layout: cute.ComposedLayout,
sKt_layout: cute.ComposedLayout,
sV_layout: cute.ComposedLayout,
sLSE_layout: cute.Layout,
sdPsum_layout: cute.Layout,
sdO_layout: cute.ComposedLayout,
sdOt_layout: cute.ComposedLayout,
sdSt_layout: cute.ComposedLayout,
sdS_layout: cute.ComposedLayout,
sdS_xchg_layout: cute.Layout,
sdQaccum_layout: cute.Layout,
sdK_layout: cute.ComposedLayout | cute.Layout,
sdV_layout: cute.ComposedLayout | cute.Layout,
tP_layout: cute.ComposedLayout,
tdS_layout: cute.ComposedLayout,
tiled_mma_S: cute.TiledMma,
tiled_mma_dP: cute.TiledMma,
tiled_mma_dV: cute.TiledMma,
tiled_mma_dK: cute.TiledMma,
tiled_mma_dQ: cute.TiledMma,
tiled_copy_r2s_dKV: cute.TiledCopy,
softmax_scale: cutlass.Float32,
softmax_scale_log2: cutlass.Float32,
window_size_left: Optional[Int32],
window_size_right: Optional[Int32],
tile_sched_params: ParamsBase,
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
blocksparse_tensors: Optional[BlockSparseTensors] = None,
):
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx())
bidx, _, _ = cute.arch.block_idx()
mma_tile_coord_v = bidx % self.cta_group_size
is_leader_cta = mma_tile_coord_v == 0
cta_rank_in_cluster = cute.arch.make_warp_uniform(cute.arch.block_idx_in_cluster())
# Prefetch tma descriptor
if warp_idx == self.load_warp_id:
with cute.arch.elect_one():
cpasync.prefetch_descriptor(tma_atom_Q)
if const_expr(tma_atom_Qt is not None):
cpasync.prefetch_descriptor(tma_atom_Qt)
cpasync.prefetch_descriptor(tma_atom_K)
if const_expr(tma_atom_Kt is not None):
cpasync.prefetch_descriptor(tma_atom_Kt)
cpasync.prefetch_descriptor(tma_atom_V)
if const_expr(tma_atom_dOt is not None):
cpasync.prefetch_descriptor(tma_atom_dOt)
cpasync.prefetch_descriptor(tma_atom_dO)
if const_expr(tma_atom_dV is not None):
cpasync.prefetch_descriptor(tma_atom_dV)
if const_expr(tma_atom_dK is not None):
cpasync.prefetch_descriptor(tma_atom_dK)
cluster_layout_vmnk = cute.tiled_divide(
cute.make_layout(self.cluster_shape_mnk),
(tiled_mma_S.thr_id.shape,),
)
# Alloc
smem = cutlass.utils.SmemAllocator()
storage = smem.allocate(self.shared_storage)
dQ_cluster_full_mbar_ptr = storage.dQ_cluster_full_mbar_ptr.data_ptr()
dQ_cluster_empty_mbar_ptr = storage.dQ_cluster_empty_mbar_ptr.data_ptr()
if const_expr(self.use_2cta_instrs):
dS_cluster_full_mbar_ptr = storage.dS_cluster_full_mbar_ptr
dS_cluster_empty_mbar_ptr = storage.dS_cluster_empty_mbar_ptr
dS_cluster_leader_mbar_ptr = storage.dS_cluster_leader_mbar_ptr
dQaccum_empty_mbar_ptr = storage.dQaccum_empty_mbar_ptr
else:
dS_cluster_full_mbar_ptr = None
dS_cluster_empty_mbar_ptr = None
dS_cluster_leader_mbar_ptr = None
dQaccum_empty_mbar_ptr = None
# Barrier initialization
if const_expr(self.use_2cta_instrs):
if const_expr(self.tile_hdim == 192):
if warp_idx == 2:
cute.arch.mbarrier_init(
dQaccum_empty_mbar_ptr,
len(self.reduce_warp_ids),
)
if warp_idx == 4:
cute.arch.mbarrier_init(dS_cluster_full_mbar_ptr, 1)
cute.arch.mbarrier_init(dS_cluster_empty_mbar_ptr, 1)
cute.arch.mbarrier_init(dS_cluster_leader_mbar_ptr, 2)
if const_expr(self.cluster_reduce_dQ):
if warp_idx == 4:
for i in range(self.dQaccum_reduce_stage // 2):
cute.arch.mbarrier_init(dQ_cluster_full_mbar_ptr + i, 1)
cute.arch.mbarrier_init(dQ_cluster_empty_mbar_ptr + i, 1)
tmem_alloc_barrier = cutlass.pipeline.NamedBarrier(
barrier_id=int(NamedBarrierBwdSm100.TmemPtr),
num_threads=cute.arch.WARP_SIZE
* len((self.mma_warp_id, *self.compute_warp_ids, *self.reduce_warp_ids)),
)
tmem = cutlass.utils.TmemAllocator(
storage.tmem_holding_buf,
barrier_for_retrieve=tmem_alloc_barrier,
allocator_warp_id=self.mma_warp_id,
is_two_cta=self.use_2cta_instrs,
two_cta_tmem_dealloc_mbar_ptr=storage.tmem_dealloc_mbar_ptr,
)
# UMMA producers and AsyncThread consumers
pipeline_producer_group_MMA_AsyncThread = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread, len([self.mma_warp_id])
)
pipeline_consumer_group_MMA_AsyncThread = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread, len(self.compute_warp_ids) * self.cta_group_size
)
pipeline_S_P = cutlass.pipeline.PipelineUmmaAsync.create(
num_stages=1,
producer_group=pipeline_producer_group_MMA_AsyncThread,
consumer_group=pipeline_consumer_group_MMA_AsyncThread,
barrier_storage=storage.S_mbar_ptr.data_ptr(),
cta_layout_vmnk=cluster_layout_vmnk,
)
pipeline_dP = cutlass.pipeline.PipelineUmmaAsync.create(
num_stages=1,
producer_group=pipeline_producer_group_MMA_AsyncThread,
consumer_group=pipeline_consumer_group_MMA_AsyncThread,
barrier_storage=storage.dP_mbar_ptr.data_ptr(),
cta_layout_vmnk=cluster_layout_vmnk,
)
pipeline_dKV = cutlass.pipeline.PipelineUmmaAsync.create(
num_stages=2,
producer_group=pipeline_producer_group_MMA_AsyncThread,
consumer_group=pipeline_consumer_group_MMA_AsyncThread,
barrier_storage=storage.dKV_mbar_ptr.data_ptr(),
cta_layout_vmnk=cluster_layout_vmnk,
)
pipeline_consumer_group_MMA_AsyncThread_dQ = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread,
len(self.reduce_warp_ids) * self.cta_group_size,
) # Compute
pipeline_dQ = cutlass.pipeline.PipelineUmmaAsync.create(
num_stages=1,
producer_group=pipeline_producer_group_MMA_AsyncThread,
consumer_group=pipeline_consumer_group_MMA_AsyncThread_dQ,
barrier_storage=storage.dQ_mbar_ptr.data_ptr(),
cta_layout_vmnk=cluster_layout_vmnk,
)
# AsyncThread producers and UMMA consumers
# Only 1 thread per warp will signal
pipeline_PdS_producer_group = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread,
len(self.compute_warp_ids) * self.cta_group_size,
) # Compute
pipeline_PdS_consumer_group = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread, len([self.mma_warp_id])
) # MMA
pipeline_dS = cutlass.pipeline.PipelineAsyncUmma.create(
num_stages=1,
producer_group=pipeline_PdS_producer_group,
consumer_group=pipeline_PdS_consumer_group,
barrier_storage=storage.dS_mbar_ptr.data_ptr(),
cta_layout_vmnk=cluster_layout_vmnk,
)
# TMA producer and UMMA consumers
pipeline_producer_group = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread, len([self.load_warp_id])
)
# The arrive count is the number of mcast size
pipeline_consumer_group = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread, len([self.mma_warp_id]) * self.num_mcast_ctas_b
)
pipeline_consumer_group_compute = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread,
len(self.compute_warp_ids) * 1,
)
pipeline_LSE = cutlass.pipeline.PipelineTmaAsync.create(
barrier_storage=storage.LSE_mbar_ptr.data_ptr(),
num_stages=self.Q_stage,
producer_group=pipeline_producer_group,
consumer_group=pipeline_consumer_group_compute,
tx_count=self.tma_copy_bytes["LSE"],
# cta_layout_vmnk=cluster_layout_vmnk,
defer_sync=True,
)
pipeline_dPsum = cutlass.pipeline.PipelineTmaAsync.create(
barrier_storage=storage.dPsum_mbar_ptr.data_ptr(),
num_stages=self.dO_stage,
producer_group=pipeline_producer_group,
consumer_group=pipeline_consumer_group_compute,
tx_count=self.tma_copy_bytes["dPsum"],
# cta_layout_vmnk=cluster_layout_vmnk,
defer_sync=True,
)
pipeline_Q = pipeline.PipelineTmaUmma.create(
barrier_storage=storage.Q_mbar_ptr.data_ptr(),
num_stages=self.Q_stage,
producer_group=pipeline_producer_group,
consumer_group=pipeline_consumer_group,
tx_count=self.tma_copy_bytes["Q"],
cta_layout_vmnk=cluster_layout_vmnk,
defer_sync=True,
)
if const_expr(self.use_2cta_instrs):
if const_expr(self.tile_hdim == 192):
pipeline_Qt = pipeline_Q
else:
pipeline_Qt = pipeline.PipelineTmaUmma.create(
barrier_storage=storage.Qt_mbar_ptr.data_ptr(),
num_stages=self.Q_stage,
producer_group=pipeline_producer_group,
consumer_group=pipeline_consumer_group,
tx_count=self.tma_copy_bytes["Q"],
cta_layout_vmnk=cluster_layout_vmnk,
defer_sync=True,
)
pipeline_Kt = pipeline.PipelineTmaUmma.create(
barrier_storage=storage.Kt_mbar_ptr.data_ptr(),
num_stages=self.single_stage,
producer_group=pipeline_producer_group,
consumer_group=pipeline_consumer_group,
tx_count=self.tma_copy_bytes["K"],
cta_layout_vmnk=cluster_layout_vmnk,
defer_sync=True,
)
else:
pipeline_Qt = pipeline_Kt = pipeline_Q
pipeline_dO = pipeline.PipelineTmaUmma.create(
barrier_storage=storage.dO_mbar_ptr.data_ptr(),
num_stages=self.dO_stage,
producer_group=pipeline_producer_group,
consumer_group=pipeline_consumer_group,
tx_count=self.tma_copy_bytes["dO"],
cta_layout_vmnk=cluster_layout_vmnk,
defer_sync=False,
)
sQ = storage.sQ.get_tensor(sQ_layout.outer, swizzle=sQ_layout.inner, dtype=self.q_dtype)
if const_expr(self.use_2cta_instrs and self.tile_hdim <= 128):
sQt = storage.sQt.get_tensor(
sQt_layout.outer, swizzle=sQt_layout.inner, dtype=self.q_dtype
)
else:
sQt = cute.make_tensor(
cute.recast_ptr(sQ.iterator, sQt_layout.inner, dtype=self.q_dtype), sQt_layout.outer
)
sK = storage.sK.get_tensor(sK_layout.outer, swizzle=sK_layout.inner)
if const_expr(self.use_2cta_instrs):
sKt = storage.sKt.get_tensor(sKt_layout.outer, swizzle=sKt_layout.inner)
else:
sKt = cute.make_tensor(cute.recast_ptr(sK.iterator, sKt_layout.inner), sKt_layout.outer)
sV = storage.sV.get_tensor(sV_layout.outer, swizzle=sV_layout.inner)
sdSt = storage.sdS.get_tensor(sdSt_layout.outer, swizzle=sdSt_layout.inner)
sdS = cute.make_tensor(cute.recast_ptr(sdSt.iterator, sdS_layout.inner), sdS_layout.outer)
if const_expr(self.use_2cta_instrs):
if const_expr(self.tile_hdim <= 128):
sdS_xchg = storage.sdS_xchg.get_tensor(sdS_xchg_layout)
else:
sdS_xchg = storage.sdQaccum.get_tensor(sdS_xchg_layout, dtype=self.ds_dtype)
else:
sdS_xchg = None
sdO = storage.sdO.get_tensor(
sdO_layout.outer, swizzle=sdO_layout.inner, dtype=self.do_dtype
)
if const_expr(self.use_2cta_instrs and self.tile_hdim <= 128):
sdOt = storage.sdOt.get_tensor(
sdOt_layout.outer, swizzle=sdOt_layout.inner, dtype=self.do_dtype
)
else:
sdOt = cute.make_tensor(
cute.recast_ptr(sdO.iterator, sdOt_layout.inner, dtype=self.do_dtype),
sdOt_layout.outer,
)
sLSE = storage.sLSE.get_tensor(sLSE_layout)
sdPsum = storage.sdPsum.get_tensor(sdPsum_layout)
if const_expr(self.use_2cta_instrs):
if const_expr(not self.dKV_postprocess):
sdV = storage.sV.get_tensor(
sdV_layout.outer, swizzle=sdV_layout.inner, dtype=self.dv_dtype
)
sdK = storage.sK.get_tensor(
sdK_layout.outer, swizzle=sdK_layout.inner, dtype=self.dk_dtype
)
else:
sdV = storage.sV.get_tensor(sdV_layout, dtype=self.dv_dtype)
sdK = storage.sK.get_tensor(sdK_layout, dtype=self.dk_dtype)
elif const_expr(not self.dKV_postprocess):
sdV = storage.sdO.get_tensor(
sdV_layout.outer, swizzle=sdV_layout.inner, dtype=self.dv_dtype
)
sdK = storage.sQ.get_tensor(
sdK_layout.outer, swizzle=sdK_layout.inner, dtype=self.dk_dtype
)
else:
sdV = storage.sdO.get_tensor(sdV_layout, dtype=self.dv_dtype)
sdK = storage.sQ.get_tensor(sdK_layout, dtype=self.dk_dtype)
# Buffer sizing is guaranteed by max(...) in SharedStorage declarations
# for both sQ (reused as sdK) and sdO (reused as sdV)
sdQaccum = storage.sdQaccum.get_tensor(sdQaccum_layout)
# TMEM
# This is a fake tensor, by right need to retrieve tmem_ptr. But we know that we always
# request 512 columns of tmem, so we know that it starts at 0.
tmem_ptr = cute.make_ptr(Float32, 0, mem_space=cute.AddressSpace.tmem, assumed_align=16)
# S
thr_mma_S = tiled_mma_S.get_slice(mma_tile_coord_v)
Sacc_shape = thr_mma_S.partition_shape_C(self.mma_tiler_kq[:2]) # (M, N)
tStS = thr_mma_S.make_fragment_C(Sacc_shape)
# (MMA, MMA_M, MMA_N)
tStS = cute.make_tensor(tmem_ptr + self.tmem_S_offset, tStS.layout)
# dP
thr_mma_dP = tiled_mma_dP.get_slice(mma_tile_coord_v)
dPacc_shape = thr_mma_dP.partition_shape_C(self.mma_tiler_vdo[:2])
tdPtdP = thr_mma_dP.make_fragment_C(dPacc_shape)
tdPtdP = cute.make_tensor(tmem_ptr + self.tmem_dP_offset, tdPtdP.layout)
# dV
thr_mma_dV = tiled_mma_dV.get_slice(mma_tile_coord_v)
dvacc_shape = thr_mma_dV.partition_shape_C(self.mma_tiler_pdo[:2])
tdVtdV = thr_mma_dV.make_fragment_C(dvacc_shape)
tdVtdV = cute.make_tensor(tmem_ptr + self.tmem_dV_offset, tdVtdV.layout)
tP = cute.make_tensor(
cute.recast_ptr(tmem_ptr + self.tmem_P_offset, dtype=self.do_dtype), tP_layout.outer
)
# dK
thr_mma_dK = tiled_mma_dK.get_slice(mma_tile_coord_v)
dkacc_shape = thr_mma_dK.partition_shape_C(self.mma_tiler_dsq[:2])
tdKtdK = thr_mma_dK.make_fragment_C(dkacc_shape)
tdKtdK = cute.make_tensor(tmem_ptr + self.tmem_dK_offset, tdKtdK.layout)
tdS = cute.make_tensor(
cute.recast_ptr(tmem_ptr + self.tmem_dS_offset, dtype=self.ds_dtype), tdS_layout.outer
)
# dQ
thr_mma_dQ = tiled_mma_dQ.get_slice(mma_tile_coord_v)
dQacc_shape = thr_mma_dQ.partition_shape_C(self.mma_tiler_dsk[:2])
tdQtdQ = thr_mma_dQ.make_fragment_C(dQacc_shape)
tdQtdQ = cute.make_tensor(tmem_ptr + self.tmem_dQ_offset, tdQtdQ.layout)
block_info = BlockInfo(
self.tile_m,
# self.tile_n,
self.tile_n * self.cluster_shape_mnk[0], # careful, this case is not very well-tested
self.is_causal,
self.is_local,
False, # is_split_kv
window_size_left,
window_size_right,
qhead_per_kvhead_packgqa=1,
)
SeqlenInfoCls = partial(
SeqlenInfoQK.create,
seqlen_q_static=mQ.shape[0],
seqlen_k_static=mK.shape[0],
mCuSeqlensQ=mCuSeqlensQ,
mCuSeqlensK=mCuSeqlensK,
mSeqUsedQ=mSeqUsedQ,
mSeqUsedK=mSeqUsedK,
tile_m=self.tile_m,
tile_n=self.tile_n * self.cluster_shape_mnk[0],
)
TileSchedulerCls = partial(self.tile_scheduler_cls.create, tile_sched_params)
AttentionMaskCls = partial(
AttentionMask,
self.tile_m,
self.tile_n * self.cta_group_size,
swap_AB=True,
window_size_left=window_size_left,
window_size_right=window_size_right,
)
# EMPTY
# (15)
if warp_idx == self.empty_warp_id:
cute.arch.setmaxregister_decrease(self.num_regs_empty)
# RELAY
# (14)
if warp_idx == self.relay_warp_id:
cute.arch.setmaxregister_decrease(
self.num_regs_mma if self.use_2cta_instrs else self.num_regs_empty
)
if const_expr(self.use_2cta_instrs):
self.relay(
dS_cluster_full_mbar_ptr,
dS_cluster_empty_mbar_ptr,
dS_cluster_leader_mbar_ptr,
cluster_layout_vmnk,
block_info,
SeqlenInfoCls,
TileSchedulerCls,
)
# LOAD
# (13)
if warp_idx == self.load_warp_id:
cute.arch.setmaxregister_decrease(self.num_regs_load)
self.load(
thr_mma_S,
thr_mma_dP,
thr_mma_dV,
thr_mma_dK,
thr_mma_dQ,
mQ,
mK,
mKt,
mV,
mdO,
mQt,
mdOt,
mLSE,
mdPsum,
sQ,
sK,
sKt,
sV,
sdO,
sQt,
sdOt,
sLSE,
sdPsum,
tma_atom_Q,
tma_atom_K,
tma_atom_Kt,
tma_atom_V,
tma_atom_dO,
tma_atom_Qt,
tma_atom_dOt,
pipeline_Q,
pipeline_Qt,
pipeline_Kt,
pipeline_dO,
pipeline_LSE,
pipeline_dPsum,
cluster_layout_vmnk,
block_info,
SeqlenInfoCls,
TileSchedulerCls,
blocksparse_tensors,
should_load_Q=True,
should_load_dO=True,
)
# MMA
# (12)
if warp_idx == self.mma_warp_id:
cute.arch.setmaxregister_decrease(self.num_regs_mma)
# Alloc tmem buffer
tmem.allocate(self.tmem_alloc_cols)
tmem.wait_for_alloc()
tmem_ptr = tmem.retrieve_ptr(Float32)
self.mma(
tiled_mma_S,
tiled_mma_dP,
tiled_mma_dV,
tiled_mma_dK,
tiled_mma_dQ,
sQ,
sQt,
sK,
sKt,
sV,
sdO,
sdOt,
tP,
sdSt,
sdS,
tdS,
tStS,
tdPtdP,
tdVtdV,
tdKtdK,
tdQtdQ,
dS_cluster_full_mbar_ptr,
dS_cluster_empty_mbar_ptr,
dS_cluster_leader_mbar_ptr,
pipeline_Q,
pipeline_Qt,
pipeline_Kt,
pipeline_dO,
pipeline_S_P,
pipeline_dS,
pipeline_dKV,
pipeline_dP,
pipeline_dQ,
block_info,
SeqlenInfoCls,
TileSchedulerCls,
is_leader_cta,
blocksparse_tensors,
)
# Dealloc the tensor memory buffer
tmem.relinquish_alloc_permit()
tmem.free(tmem_ptr)
# Compute
# (4, 5, 6, 7, 8, 9, 10, 11) --> 8 warps
if warp_idx >= self.compute_warp_ids[0] and warp_idx <= self.compute_warp_ids[-1]:
cute.arch.setmaxregister_increase(self.num_regs_compute) # 8 warps
tmem.wait_for_alloc()
tmem_ptr = tmem.retrieve_ptr(Float32)
self.compute_loop(
thr_mma_S,
thr_mma_dP,
thr_mma_dV,
thr_mma_dK,
tStS,
tdPtdP,
tdVtdV,
tdKtdK,
sLSE,
sdPsum,
mdV,
mdK,
sdS,
sdS_xchg,
pipeline_LSE,
pipeline_dPsum,
pipeline_S_P,
pipeline_dS,
pipeline_dKV,
pipeline_dP,
dS_cluster_empty_mbar_ptr,
dS_cluster_full_mbar_ptr,
dQaccum_empty_mbar_ptr,
softmax_scale,
softmax_scale_log2,
block_info,
SeqlenInfoCls,
AttentionMaskCls,
TileSchedulerCls,
sdV,
sdK,
mdV_tma_tensor,
mdK_tma_tensor,
tma_atom_dV,
tma_atom_dK,
tiled_copy_r2s_dKV,
mdK_semaphore,
mdV_semaphore,
aux_tensors,
fastdiv_mods,
blocksparse_tensors,
)
# Reduce
# (0, 1, 2, 3) - dQ
if warp_idx >= self.reduce_warp_ids[0] and warp_idx <= self.reduce_warp_ids[-1]:
cute.arch.setmaxregister_increase(self.num_regs_reduce)
tmem.wait_for_alloc()
tmem_ptr = tmem.retrieve_ptr(Float32)
self.dQacc_reduce(
mdQaccum,
sdQaccum,
thr_mma_dQ,
tdQtdQ,
pipeline_dQ,
dQaccum_empty_mbar_ptr,
block_info,
SeqlenInfoCls,
TileSchedulerCls,
mdQ_semaphore,
blocksparse_tensors,
)
return
@cute.jit
def relay(
self,
dS_cluster_full_mbar_ptr: cute.Pointer,
dS_cluster_empty_mbar_ptr: cute.Pointer,
dS_cluster_leader_mbar_ptr: cute.Pointer,
cluster_layout_vmnk: cute.Layout,
block_info: BlockInfo,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
):
cta_rank_in_cluster = cute.arch.make_warp_uniform(cute.arch.block_idx_in_cluster())
dS_cluster_phase = Int32(0)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
n_block, head_idx, batch_idx, _ = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
m_block_min, m_block_max = block_info.get_m_block_min_max(
seqlen, n_block // self.cluster_shape_mnk[0]
)
head_idx_kv = head_idx // self.qhead_per_kvhead
process_tile = (
const_expr(not self.is_local and not self.is_varlen_q) or m_block_min < m_block_max
)
if process_tile:
num_iters = m_block_max - m_block_min
for _ in cutlass.range(num_iters, unroll=1):
# Wait for dS_xchg from peer CTA
cute.arch.mbarrier_wait(dS_cluster_full_mbar_ptr, phase=dS_cluster_phase)
# Arrive on MMA leader warp
with cute.arch.elect_one():
cute.arch.mbarrier_arrive(dS_cluster_leader_mbar_ptr, Int32(0))
dS_cluster_phase ^= 1
tile_scheduler.prefetch_next_work()
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
@cute.jit
def load(
self,
thr_mma_S: cute.core.ThrMma,
thr_mma_dP: cute.core.ThrMma,
thr_mma_dV: cute.core.ThrMma,
thr_mma_dK: cute.core.ThrMma,
thr_mma_dQ: cute.core.ThrMma,
mQ: cute.Tensor,
mK: cute.Tensor,
mKt: Optional[cute.Tensor],
mV: cute.Tensor,
mdO: cute.Tensor,
mQt: Optional[cute.Tensor],
mdOt: Optional[cute.Tensor],
mLSE: cute.Tensor,
mdPsum: cute.Tensor,
sQ: cute.Tensor,
sK: cute.Tensor,
sKt: cute.Tensor,
sV: cute.Tensor,
sdO: cute.Tensor,
sQt: cute.Tensor,
sdOt: cute.Tensor,
sLSE: cute.Tensor,
sdPsum: cute.Tensor,
tma_atom_Q: cute.CopyAtom,
tma_atom_K: cute.CopyAtom,
tma_atom_Kt: Optional[cute.CopyAtom],
tma_atom_V: cute.CopyAtom,
tma_atom_dO: cute.CopyAtom,
tma_atom_Qt: Optional[cute.CopyAtom],
tma_atom_dOt: Optional[cute.CopyAtom], # 2-CTA only
pipeline_Q: PipelineAsync,
pipeline_Qt: PipelineAsync,
pipeline_Kt: PipelineAsync,
pipeline_dO: PipelineAsync,
pipeline_LSE: PipelineAsync,
pipeline_dPsum: PipelineAsync,
cluster_layout_vmnk: cute.Layout,
block_info: BlockInfo,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
blocksparse_tensors: Optional[BlockSparseTensors] = None,
should_load_Q: bool = True,
should_load_dO: bool = True,
):
producer_state_Q_LSE = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.Q_stage
)
producer_state_Qt = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.Q_stage
)
producer_state_Kt = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.single_stage
)
producer_state_dO_dPsum = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.dO_stage
)
producer_state_Q_Qt = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.Q_stage
)
producer_state_O_Ot = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.dO_stage
)
producer_state_LSE = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.Q_stage
)
producer_state_dPsum = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.dO_stage
)
# Compute multicast mask for Q & dO buffer full
cta_rank_in_cluster = cute.arch.make_warp_uniform(cute.arch.block_idx_in_cluster())
block_in_cluster_coord_vmnk = cluster_layout_vmnk.get_flat_coord(cta_rank_in_cluster)
q_do_mcast_mask = None
if const_expr(self.is_q_do_mcast):
q_do_mcast_mask = cpasync.create_tma_multicast_mask(
cluster_layout_vmnk, block_in_cluster_coord_vmnk, mcast_mode=1
)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
n_block, head_idx, batch_idx, _ = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
m_block_min, m_block_max = block_info.get_m_block_min_max(
seqlen, n_block // self.cluster_shape_mnk[0]
)
head_idx_kv = head_idx // self.qhead_per_kvhead
n_block_cta_group = n_block // self.cta_group_size
# GMEM tensors (varlen-aware)
mQ_cur = seqlen.offset_batch_Q(mQ, batch_idx, dim=3)[None, None, head_idx]
mK_cur = seqlen.offset_batch_K(mK, batch_idx, dim=3)[None, None, head_idx_kv]
mV_cur = seqlen.offset_batch_K(mV, batch_idx, dim=3)[None, None, head_idx_kv]
if const_expr(not seqlen.has_cu_seqlens_q):
mdO_cur = mdO[None, None, head_idx, batch_idx]
else:
mdO_cur = cute.domain_offset((0, seqlen.offset_q), mdO[None, None, head_idx])
mLSE_cur = seqlen.offset_batch_Q(mLSE, batch_idx, dim=2, padded=True)[None, head_idx]
mdPsum_cur = seqlen.offset_batch_Q(mdPsum, batch_idx, dim=2, padded=True)[
None, head_idx
]
if const_expr(self.use_2cta_instrs):
if const_expr(not seqlen.has_cu_seqlens_q):
mQt_cur = mQt[None, None, head_idx, batch_idx]
mdOt_cur = mdOt[None, None, head_idx, batch_idx]
else:
mQt_cur = cute.domain_offset((0, seqlen.offset_q, 0), mQt)[None, None, head_idx]
mdOt_cur = cute.domain_offset((seqlen.offset_q, 0, 0), mdOt)[
None, None, head_idx
]
if const_expr(not seqlen.has_cu_seqlens_k):
mKt_cur = mKt[None, None, head_idx_kv, batch_idx]
else:
mKt_cur = cute.domain_offset((0, seqlen.offset_k, 0), mKt)[
None, None, head_idx_kv
]
# (1) S.T = K @ Q.T
gK = cute.local_tile(
mK_cur, cute.select(self.mma_tiler_kq, mode=[0, 2]), (n_block_cta_group, 0)
)
tSgK = thr_mma_S.partition_A(gK)
gQ = cute.local_tile(mQ_cur, cute.select(self.mma_tiler_kq, mode=[1, 2]), (None, 0))
tSgQ = thr_mma_S.partition_B(gQ)
gLSE = cute.local_tile(mLSE_cur, (self.tile_m,), (None,))
gdPsum = cute.local_tile(mdPsum_cur, (self.tile_m,), (None,))
gdO = cute.local_tile(mdO_cur, cute.select(self.mma_tiler_pdo, mode=[1, 2]), (0, None))
tdPgdO = thr_mma_dV.partition_B(gdO)
a_cta_layout = cute.make_layout(cute.slice_(cluster_layout_vmnk, (0, 0, None, 0)).shape)
load_K, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_K,
block_in_cluster_coord_vmnk[2],
a_cta_layout,
tSgK,
sK,
single_stage=True,
)
b_cta_layout = cute.make_layout(cute.slice_(cluster_layout_vmnk, (0, None, 0, 0)).shape)
load_Q, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_Q,
cta_coord=block_in_cluster_coord_vmnk[1],
cta_layout=b_cta_layout,
src_tensor=tSgQ,
dst_tensor=sQ,
mcast_mask=q_do_mcast_mask,
)
load_Q = copy_utils.tma_producer_copy_fn(load_Q, pipeline_Q)
# (2) dP = V @ dO.T
gV = cute.local_tile(
mV_cur, cute.select(self.mma_tiler_vdo, mode=[0, 2]), (n_block_cta_group, 0)
)
tdPgV = thr_mma_dP.partition_A(gV)
load_V, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_V,
0,
cute.make_layout(1),
tdPgV,
sV,
single_stage=True,
)
if const_expr(tma_atom_dOt is not None):
gdOt = cute.local_tile(
mdOt_cur, cute.select(self.mma_tiler_vdo, mode=[1, 2]), (None, 0)
)
tdPgdO = thr_mma_dP.partition_B(gdOt)
load_dOt, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_dOt,
cta_coord=block_in_cluster_coord_vmnk[1],
cta_layout=b_cta_layout,
src_tensor=tdPgdO,
dst_tensor=sdOt,
mcast_mask=q_do_mcast_mask,
)
load_dOt = copy_utils.tma_producer_copy_fn(load_dOt, pipeline_dO)
# (3) dV += P.T @ dO
gdO = cute.local_tile(mdO_cur, cute.select(self.mma_tiler_pdo, mode=[1, 2]), (0, None))
tdVgdO = thr_mma_dV.partition_B(gdO)
load_dO, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_dO,
cta_coord=block_in_cluster_coord_vmnk[1],
cta_layout=b_cta_layout,
src_tensor=tdVgdO,
dst_tensor=sdO,
mcast_mask=q_do_mcast_mask,
)
load_dO = copy_utils.tma_producer_copy_fn(load_dO, pipeline_dO)
# (4) dK += dS.T @ Q (2-CTA: needs separate Qt load)
if const_expr(tma_atom_Qt is not None):
gQt = cute.local_tile(
mQt_cur, cute.select(self.mma_tiler_dsq, mode=[1, 2]), (0, None)
)
tdKgQt = thr_mma_dK.partition_B(gQt)
load_Qt, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_Qt,
cta_coord=block_in_cluster_coord_vmnk[1],
cta_layout=b_cta_layout,
src_tensor=tdKgQt,
dst_tensor=sQt,
mcast_mask=q_do_mcast_mask,
)
load_Qt = copy_utils.tma_producer_copy_fn(load_Qt, pipeline_Qt)
# (5) dQ = dS @ K
if const_expr(self.use_2cta_instrs):
gKt = cute.local_tile(
mKt_cur, cute.select(self.mma_tiler_dsk, mode=[1, 2]), (0, n_block_cta_group)
)
tdQgK = thr_mma_dQ.partition_B(gKt)
load_Kt, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_Kt,
block_in_cluster_coord_vmnk[1],
b_cta_layout,
tdQgK,
sKt,
single_stage=True,
)
copy_atom_stats = cute.make_copy_atom(cpasync.CopyBulkG2SOp(), Float32)
copy_stats = partial(cute.copy, copy_atom_stats)
# copy_atom_stats = cute.make_copy_atom(cpasync.CopyBulkG2SMulticastOp(), Float32)
# sLSE = cute.logical_divide(sLSE, (64,))[(None, block_in_cluster_coord_vmnk[1]), None]
# gLSE = cute.logical_divide(gLSE, (64,))[(None, block_in_cluster_coord_vmnk[1]), None]
# sdPsum = cute.logical_divide(sdPsum, (64,))[(None, block_in_cluster_coord_vmnk[1]), None]
# gdPsum = cute.logical_divide(gdPsum, (64,))[(None, block_in_cluster_coord_vmnk[1]), None]
# copy_stats = partial(cute.copy, copy_atom_stats, mcast_mask=q_do_mcast_mask)
# some tiles might be empty due to block sparsity
if const_expr(self.use_block_sparsity):
total_m_block_cnt = get_total_q_block_count_bwd(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
process_tile = total_m_block_cnt > Int32(0)
else:
process_tile = (
const_expr(not self.is_local and not self.is_varlen_q)
or m_block_min < m_block_max
)
if process_tile:
if const_expr(self.use_block_sparsity):
producer_state_Q_LSE, producer_state_dO_dPsum = (
produce_block_sparse_q_loads_bwd_sm100(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
producer_state_Q_LSE,
producer_state_dO_dPsum,
pipeline_Q,
pipeline_LSE,
pipeline_dO,
pipeline_dPsum,
load_K,
load_V,
load_Q,
load_dO,
copy_stats,
gLSE,
sLSE,
gdPsum,
sdPsum,
self.tma_copy_bytes["K"],
self.tma_copy_bytes["V"],
should_load_Q=should_load_Q,
should_load_dO=should_load_dO,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
)
else:
first_m_block = m_block_min
if const_expr(self.use_2cta_instrs and self.tile_hdim == 192):
#### Prologue ####
assert should_load_Q and should_load_dO
# K & Q (for S)
pipeline_Q.producer_acquire(
producer_state_Q_Qt,
extra_tx_count=self.tma_copy_bytes["K"],
)
load_K(tma_bar_ptr=pipeline_Q.producer_get_barrier(producer_state_Q_Qt))
load_Q(first_m_block, producer_state=producer_state_Q_Qt)
pipeline_Q.producer_commit(producer_state_Q_Qt)
producer_state_Q_Qt.advance()
# LSE
pipeline_LSE.producer_acquire(producer_state_LSE)
with cute.arch.elect_one():
copy_stats(
gLSE[None, first_m_block],
sLSE[None, producer_state_LSE.index],
mbar_ptr=pipeline_LSE.producer_get_barrier(producer_state_LSE),
)
producer_state_LSE.advance()
# dOt + V, for dP.T = V @ dO.T
pipeline_dO.producer_acquire(
producer_state_O_Ot,
extra_tx_count=self.tma_copy_bytes["V"],
)
load_V(tma_bar_ptr=pipeline_dO.producer_get_barrier(producer_state_O_Ot))
load_dOt(first_m_block, producer_state=producer_state_O_Ot)
pipeline_dO.producer_commit(producer_state_O_Ot)
producer_state_O_Ot.advance()
# dPsum
pipeline_dPsum.producer_acquire(producer_state_dPsum)
with cute.arch.elect_one():
copy_stats(
gdPsum[None, first_m_block],
sdPsum[None, producer_state_dPsum.index],
mbar_ptr=pipeline_dPsum.producer_get_barrier(producer_state_dPsum),
)
producer_state_dPsum.advance()
# Qt, for dK = dS.T @ Q
pipeline_Qt.producer_acquire(
producer_state_Q_Qt,
extra_tx_count=self.tma_copy_bytes["K"],
)
load_Qt(first_m_block, producer_state=producer_state_Q_Qt)
load_Kt(tma_bar_ptr=pipeline_Qt.producer_get_barrier(producer_state_Q_Qt))
pipeline_Qt.producer_commit(producer_state_Q_Qt)
producer_state_Q_Qt.advance()
# dO, for dV = P.T @ dO
pipeline_dO.producer_acquire(producer_state_O_Ot)
load_dO(first_m_block, producer_state=producer_state_O_Ot)
pipeline_dO.producer_commit(producer_state_O_Ot)
producer_state_O_Ot.advance()
#### Mainloop ####
# 2CTA: [lse | Q | dOt | dPsum | Qt | dO]
for m_block in cutlass.range(m_block_min + 1, m_block_max, unroll=1):
# LSE
pipeline_LSE.producer_acquire(producer_state_LSE)
with cute.arch.elect_one():
copy_stats(
gLSE[None, m_block],
sLSE[None, producer_state_LSE.index],
mbar_ptr=pipeline_LSE.producer_get_barrier(producer_state_LSE),
)
producer_state_LSE.advance()
# Q
pipeline_Q.producer_acquire(producer_state_Q_Qt)
load_Q(m_block, producer_state=producer_state_Q_Qt)
pipeline_Q.producer_commit(producer_state_Q_Qt)
producer_state_Q_Qt.advance()
# dPsum
pipeline_dPsum.producer_acquire(producer_state_dPsum)
with cute.arch.elect_one():
copy_stats(
gdPsum[None, m_block],
sdPsum[None, producer_state_dPsum.index],
mbar_ptr=pipeline_dPsum.producer_get_barrier(
producer_state_dPsum
),
)
producer_state_dPsum.advance()
# dOt, for dP.T = V @ dO.T
pipeline_dO.producer_acquire(producer_state_O_Ot)
load_dOt(m_block, producer_state=producer_state_O_Ot)
pipeline_dO.producer_commit(producer_state_O_Ot)
producer_state_O_Ot.advance()
# Qt, for dK = dS.T @ Q
pipeline_Qt.producer_acquire(producer_state_Q_Qt)
load_Qt(m_block, producer_state=producer_state_Q_Qt)
pipeline_Qt.producer_commit(producer_state_Q_Qt)
producer_state_Q_Qt.advance()
# dO, for dV = P.T @ dO
pipeline_dO.producer_acquire(producer_state_O_Ot)
load_dO(m_block, producer_state=producer_state_O_Ot)
pipeline_dO.producer_commit(producer_state_O_Ot)
producer_state_O_Ot.advance()
else:
#### Prologue ####
if const_expr(should_load_Q):
# K & Q (for S)
pipeline_Q.producer_acquire(
producer_state_Q_LSE, extra_tx_count=self.tma_copy_bytes["K"]
)
load_K(
tma_bar_ptr=pipeline_Q.producer_get_barrier(producer_state_Q_LSE)
)
load_Q(first_m_block, producer_state=producer_state_Q_LSE)
pipeline_Q.producer_commit(producer_state_Q_LSE)
# LSE
pipeline_LSE.producer_acquire(producer_state_Q_LSE)
with cute.arch.elect_one():
copy_stats(
gLSE[None, first_m_block],
sLSE[None, producer_state_Q_LSE.index],
mbar_ptr=pipeline_LSE.producer_get_barrier(
producer_state_Q_LSE
),
)
producer_state_Q_LSE.advance()
if const_expr(should_load_dO):
pipeline_dO.producer_acquire(
producer_state_dO_dPsum,
extra_tx_count=self.tma_copy_bytes["V"] + self.tma_copy_bytes["dO"]
if const_expr(tma_atom_dOt is not None)
else self.tma_copy_bytes["V"],
)
load_V(
tma_bar_ptr=pipeline_dO.producer_get_barrier(
producer_state_dO_dPsum
)
)
load_dO(first_m_block, producer_state=producer_state_dO_dPsum)
if const_expr(tma_atom_dOt is not None):
load_dOt(first_m_block, producer_state=producer_state_dO_dPsum)
pipeline_dO.producer_commit(producer_state_dO_dPsum)
# dPsum
pipeline_dPsum.producer_acquire(producer_state_dO_dPsum)
with cute.arch.elect_one():
copy_stats(
gdPsum[None, first_m_block],
sdPsum[None, producer_state_dO_dPsum.index],
mbar_ptr=pipeline_dPsum.producer_get_barrier(
producer_state_dO_dPsum
),
)
producer_state_dO_dPsum.advance()
if const_expr(self.use_2cta_instrs):
pipeline_Kt.producer_acquire(producer_state_Kt)
load_Kt(tma_bar_ptr=pipeline_Kt.producer_get_barrier(producer_state_Kt))
pipeline_Kt.producer_commit(producer_state_Kt)
producer_state_Kt.advance()
#### Main Loop ####
for m_block in cutlass.range(m_block_min + 1, m_block_max, unroll=1):
if const_expr(should_load_Q):
if const_expr(tma_atom_Qt is not None):
pipeline_Qt.producer_acquire(producer_state_Qt)
load_Qt(m_block - 1, producer_state=producer_state_Qt)
pipeline_Qt.producer_commit(producer_state_Qt)
producer_state_Qt.advance()
# Q (for S)
pipeline_Q.producer_acquire(producer_state_Q_LSE)
load_Q(m_block, producer_state=producer_state_Q_LSE)
pipeline_Q.producer_commit(producer_state_Q_LSE)
# LSE
pipeline_LSE.producer_acquire(producer_state_Q_LSE)
with cute.arch.elect_one():
copy_stats(
gLSE[None, m_block],
sLSE[None, producer_state_Q_LSE.index],
mbar_ptr=pipeline_LSE.producer_get_barrier(
producer_state_Q_LSE
),
)
producer_state_Q_LSE.advance()
if const_expr(should_load_dO):
pipeline_dO.producer_acquire(
producer_state_dO_dPsum,
extra_tx_count=self.tma_copy_bytes["dO"]
if const_expr(tma_atom_dOt is not None)
else 0,
)
load_dO(m_block, producer_state=producer_state_dO_dPsum)
if const_expr(tma_atom_dOt is not None):
load_dOt(m_block, producer_state=producer_state_dO_dPsum)
pipeline_dO.producer_commit(producer_state_dO_dPsum)
# dPsum
pipeline_dPsum.producer_acquire(producer_state_dO_dPsum)
with cute.arch.elect_one():
copy_stats(
gdPsum[None, m_block],
sdPsum[None, producer_state_dO_dPsum.index],
mbar_ptr=pipeline_dPsum.producer_get_barrier(
producer_state_dO_dPsum
),
)
producer_state_dO_dPsum.advance()
#### Tail ####
if const_expr(should_load_Q):
if const_expr(tma_atom_Qt is not None):
pipeline_Qt.producer_acquire(producer_state_Qt)
load_Qt(m_block_max - 1, producer_state=producer_state_Qt)
pipeline_Qt.producer_commit(producer_state_Qt)
producer_state_Qt.advance()
if const_expr(self.use_2cta_instrs and self.tile_hdim == 192):
pipeline_Q.producer_tail(producer_state_Q_Qt)
pipeline_LSE.producer_tail(producer_state_LSE)
pipeline_dO.producer_tail(producer_state_O_Ot)
pipeline_dPsum.producer_tail(producer_state_dPsum)
else:
if const_expr(should_load_Q):
pipeline_Q.producer_tail(producer_state_Q_LSE.clone())
pipeline_LSE.producer_tail(producer_state_Q_LSE)
if const_expr(tma_atom_Qt is not None):
pipeline_Qt.producer_tail(producer_state_Qt)
if const_expr(should_load_dO):
pipeline_dO.producer_tail(producer_state_dO_dPsum.clone())
pipeline_dPsum.producer_tail(producer_state_dO_dPsum)
tile_scheduler.prefetch_next_work()
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
@cute.jit
def mma(
self,
tiled_mma_S: cute.TiledMma,
tiled_mma_dP: cute.TiledMma,
tiled_mma_dV: cute.TiledMma,
tiled_mma_dK: cute.TiledMma,
tiled_mma_dQ: cute.TiledMma,
sQ: cute.Tensor,
sQt: cute.Tensor,
sK: cute.Tensor,
sKt: cute.Tensor,
sV: cute.Tensor,
sdO: cute.Tensor,
sdOt: cute.Tensor,
tP: cute.Tensor,
sdSt: cute.Tensor,
sdS: cute.Tensor,
tdS: cute.Tensor,
tStS: cute.Tensor,
tdPtdP: cute.Tensor,
tdVtdV: cute.Tensor,
tdKtdK: cute.Tensor,
tdQtdQ: cute.Tensor,
dS_cluster_full_mbar_ptr: cute.Pointer,
dS_cluster_empty_mbar_ptr: cute.Pointer,
dS_cluster_leader_mbar_ptr: cute.Pointer,
pipeline_Q: PipelineAsync,
pipeline_Qt: PipelineAsync,
pipeline_Kt: PipelineAsync,
pipeline_dO: PipelineAsync,
pipeline_S_P: PipelineAsync,
pipeline_dS: PipelineAsync,
pipeline_dKV: PipelineAsync,
pipeline_dP: PipelineAsync,
pipeline_dQ: PipelineAsync,
block_info: BlockInfo,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
is_leader_cta: cutlass.Boolean,
blocksparse_tensors: Optional[BlockSparseTensors] = None,
):
# [2025-10-21] For reasons I don't understand, putting these partitioning in the main
# kernel (before warp specialization) is a lot slower tha putting them here.
# Partition smem / tmem tensors
# S = K @ Q.T
tSrK = tiled_mma_S.make_fragment_A(sK)
tSrQ = tiled_mma_S.make_fragment_B(sQ)
# dP = V @ dOt.T
tdPrV = tiled_mma_dP.make_fragment_A(sV)
tdPrdOt = tiled_mma_dP.make_fragment_B(sdOt)
# dK = dS.T @ Q
# For 2-CTA, dS (dK mma) MUST come from TMEM (cannot use SMEM)
if const_expr(self.use_smem_dS_for_mma_dK and not self.use_2cta_instrs):
tdKrdS = tiled_mma_dK.make_fragment_A(sdSt) # From SMEM
else:
tdKrdS = tiled_mma_dK.make_fragment_A(tdS) # From TMEM
tdKrQ = tiled_mma_dK.make_fragment_B(sQt)
# dQ = dS @ K
tdQrdS = tiled_mma_dQ.make_fragment_A(sdS)
tdQrK = tiled_mma_dQ.make_fragment_B(sKt)
# dV = P @ dO.T
tdVrdO = tiled_mma_dV.make_fragment_B(sdO)
tdVrP = tiled_mma_dV.make_fragment_A(tP)
# mma_qk_fn = partial(gemm_w_idx, tiled_mma_S, tStS, tSrK, tSrQ, zero_init=True)
mma_qk_fn = partial(
gemm_ptx_w_idx,
tiled_mma_S,
tStS,
tSrK,
tSrQ,
sA=sK,
sB=sQ,
zero_init=True,
cta_group=self.cta_group_size,
)
# mma_dov_fn = partial(gemm_w_idx, tiled_mma_dP, tdPtdP, tdPrV, tdPrdOt, zero_init=True)
mma_dov_fn = partial(
gemm_ptx_w_idx,
tiled_mma_dP,
tdPtdP,
tdPrV,
tdPrdOt,
sA=sV,
sB=sdOt,
zero_init=True,
cta_group=self.cta_group_size,
)
# mma_pdo_fn = partial(gemm_w_idx, tiled_mma_dV, tdVtdV, tdVrP, tdVrdO)
mma_pdo_fn = partial(
gemm_ptx_w_idx,
tiled_mma_dV,
tdVtdV,
tdVrP,
tdVrdO,
sA=None,
sB=sdO,
tA_addr=self.tmem_P_offset,
cta_group=self.cta_group_size,
)
num_unroll_groups = 2 if const_expr(self.use_2cta_instrs) else 1
mma_dsk_fn = partial(
gemm_w_idx,
tiled_mma_dQ,
tdQtdQ,
tdQrdS,
tdQrK,
zero_init=True,
num_unroll_groups=num_unroll_groups,
)
# mma_dsk_fn = partial(
# gemm_ptx_w_idx, tiled_mma_dQ, tdQtdQ, tdQrdS, tdQrK, sA=sdS, sB=sKt, zero_init=True
# )
if const_expr(self.use_smem_dS_for_mma_dK and not self.use_2cta_instrs):
mma_dsq_fn = partial(gemm_w_idx, tiled_mma_dK, tdKtdK, tdKrdS, tdKrQ)
else:
# Need to explicitly pass in tA_addr for correctness
mma_dsq_fn = partial(
gemm_ptx_w_idx,
tiled_mma_dK,
tdKtdK,
tdKrdS,
tdKrQ,
sA=None,
sB=sQt,
tA_addr=self.tmem_dS_offset,
cta_group=self.cta_group_size,
)
pipeline_Q_consumer = pipeline_Q.make_consumer()
consumer_state_Qt = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, self.Q_stage
)
consumer_state_Q = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, self.Q_stage
)
consumer_state_Kt = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, self.single_stage
)
consumer_state_dO = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, self.dO_stage
)
producer_phase_acc = Int32(1) # For S & P, dP, dQ
producer_phase_dQ = Int32(1) # 2-CTA: separate phase for dQ pipeline
consumer_state_dS = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, 1
)
producer_phase_dKV = Int32(1)
cta_group = pipeline_S_P.cta_group
cta_rank_in_cluster = cute.arch.make_warp_uniform(cute.arch.block_idx_in_cluster())
dS_cluster_phase = Int32(0)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
n_block, head_idx, batch_idx, _ = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx) # must be seqlen_k
m_block_min, m_block_max = block_info.get_m_block_min_max(
seqlen, n_block // self.cluster_shape_mnk[0]
)
if const_expr(self.use_block_sparsity):
block_iter_count = get_total_q_block_count_bwd(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
process_tile = block_iter_count > Int32(0)
else:
block_iter_count = m_block_max - m_block_min
process_tile = (
const_expr(not self.is_local and not self.is_varlen_q)
or m_block_min < m_block_max
)
if const_expr(self.use_2cta_instrs and self.tile_hdim == 192):
if is_leader_cta and process_tile:
accumulate_dK = False
accumulate_dV = False
# -----------------------------------------------------------
###### MAIN LOOP
# -----------------------------------------------------------
# 1. S.T = K @ Q.T
# 2. dP.T = V @ dO.T
# 3. dK = dS.T @ Q
# 4. dV = P.T @ dO
# 5. dQ = dS @ K
main_loop_iters = m_block_max - m_block_min
# empty waits
# pipeline_S_P.sync_object_empty.wait(0, producer_phase_acc)
# pipeline_dP.sync_object_empty.wait(0, producer_phase_acc)
for _ in cutlass.range(main_loop_iters, unroll=1):
# 1) S.T = K @ Q.T
pipeline_Q.consumer_wait(consumer_state_Q)
pipeline_dQ.sync_object_empty.wait(
0, producer_phase_acc
) # dQ tmem overlaps with S
mma_qk_fn(B_idx=consumer_state_Q.index)
pipeline_S_P.sync_object_full.arrive(
0, pipeline_S_P.producer_mask, cta_group
)
pipeline_Q.consumer_release(consumer_state_Q)
consumer_state_Q.advance()
producer_phase_acc ^= 1
# 2) dP.T = V @ dO.T
pipeline_dO.consumer_wait(consumer_state_dO)
pipeline_S_P.sync_object_empty.wait(
0, producer_phase_acc
) # dP tmem overlaps with S
mma_dov_fn(B_idx=consumer_state_dO.index)
pipeline_dP.sync_object_full.arrive(0, pipeline_dP.producer_mask, cta_group)
pipeline_dO.consumer_release(consumer_state_dO)
consumer_state_dO.advance()
# 3) dK = dS.T @ Q
pipeline_Q.consumer_wait(consumer_state_Q)
pipeline_dP.sync_object_empty.wait(0, producer_phase_acc) # dP -> dS
mma_dsq_fn(B_idx=consumer_state_Q.index, zero_init=not accumulate_dK)
pipeline_Q.consumer_release(consumer_state_Q)
consumer_state_Q.advance()
accumulate_dK = True
# 4) dV = P.T @ dO
# Note: if dS is written to tmem, P must be written to tmem
pipeline_dO.consumer_wait(consumer_state_dO)
mma_pdo_fn(B_idx=consumer_state_dO.index, zero_init=not accumulate_dV)
pipeline_dO.consumer_release(consumer_state_dO)
consumer_state_dO.advance()
accumulate_dV = True
# 5) dQ = dS @ K
pipeline_dS.consumer_wait(consumer_state_dS)
cute.arch.mbarrier_wait(dS_cluster_leader_mbar_ptr, phase=dS_cluster_phase)
mma_dsk_fn()
pipeline_dQ.sync_object_full.arrive(0, pipeline_dQ.producer_mask, cta_group)
pipeline_dS.consumer_release(consumer_state_dS)
consumer_state_dS.advance()
dS_cluster_phase ^= 1
# signal to the epilogue that dV is ready
pipeline_dKV.sync_object_empty.wait(0, producer_phase_dKV)
pipeline_dKV.sync_object_full.arrive(0, pipeline_dKV.producer_mask, cta_group)
# signal to the epilogue that dK is ready
pipeline_dKV.sync_object_empty.wait(1, producer_phase_dKV)
pipeline_dKV.sync_object_full.arrive(1, pipeline_dKV.producer_mask, cta_group)
producer_phase_dKV ^= 1
elif const_expr(self.use_2cta_instrs):
if is_leader_cta and process_tile:
accumulate_dK = False
# -----------------------------------------------------------
###### Prologue
# -----------------------------------------------------------
# 1. S = Q0 @ K.T
# 2. dP = V @ dOt.T
# 3. dV = P @ dO
# 1) S = K @ Q
pipeline_Q.consumer_wait(consumer_state_Q)
pipeline_S_P.sync_object_empty.wait(0, producer_phase_acc)
mma_qk_fn(B_idx=consumer_state_Q.index)
pipeline_S_P.sync_object_full.arrive(0, pipeline_S_P.producer_mask, cta_group)
pipeline_Q.consumer_release(consumer_state_Q)
consumer_state_Q.advance()
# 2) dP = V @ dOt.T
pipeline_dO.consumer_wait(consumer_state_dO)
pipeline_dP.sync_object_empty.wait(0, producer_phase_acc)
mma_dov_fn(B_idx=consumer_state_dO.index)
pipeline_dP.sync_object_full.arrive(0, pipeline_dP.producer_mask, cta_group)
# 3) dV = P.T @ dO
producer_phase_acc ^= 1
pipeline_S_P.sync_object_empty.wait(0, producer_phase_acc)
mma_pdo_fn(B_idx=consumer_state_dO.index, zero_init=True)
pipeline_dO.consumer_release(consumer_state_dO)
consumer_state_dO.advance()
pipeline_Kt.consumer_wait(consumer_state_Kt)
# -----------------------------------------------------------
###### MAIN LOOP
# -----------------------------------------------------------
# 1. S.T = K @ Q.T
# 2. dK = dS.T @ Q
# 3. dP.T = V @ dO.T
# 4. dQ = dS @ K
# 5. dV = P.T @ dO
main_loop_iters = (
block_iter_count - 1
if const_expr(self.use_block_sparsity)
else m_block_max - m_block_min - 1
)
for _ in cutlass.range(main_loop_iters, unroll=1):
# (1) S.T = K @ Q.T (next)
pipeline_Q.consumer_wait(consumer_state_Q)
pipeline_dQ.sync_object_empty.wait(0, producer_phase_dQ)
mma_qk_fn(B_idx=consumer_state_Q.index)
pipeline_S_P.sync_object_full.arrive(
0, pipeline_S_P.producer_mask, cta_group
)
pipeline_Q.consumer_release(consumer_state_Q)
consumer_state_Q.advance()
# pipeline_dS.consumer_wait(consumer_state_dS)
# (2) dK += dS.T @ Q (cur)
pipeline_Qt.consumer_wait(consumer_state_Qt)
pipeline_dP.sync_object_empty.wait(0, producer_phase_acc) # dP -> dS
mma_dsq_fn(B_idx=consumer_state_Qt.index, zero_init=not accumulate_dK)
accumulate_dK = True
pipeline_Qt.consumer_release(consumer_state_Qt)
consumer_state_Qt.advance()
# (3) dP.T = V @ dO.T (next)
pipeline_dO.consumer_wait(consumer_state_dO)
mma_dov_fn(B_idx=consumer_state_dO.index)
pipeline_dP.sync_object_full.arrive(0, pipeline_dP.producer_mask, cta_group)
# (5) dQ = dS @ K (cur)
pipeline_dS.consumer_wait(consumer_state_dS)
cute.arch.mbarrier_wait(dS_cluster_leader_mbar_ptr, phase=dS_cluster_phase)
mma_dsk_fn()
pipeline_dQ.sync_object_full.arrive(0, pipeline_dQ.producer_mask, cta_group)
pipeline_dS.consumer_release(consumer_state_dS)
consumer_state_dS.advance()
dS_cluster_phase ^= 1
producer_phase_dQ ^= 1
# (4) dV += P.T @ dO (next)
producer_phase_acc ^= 1
pipeline_S_P.sync_object_empty.wait(0, producer_phase_acc) # S -> P
mma_pdo_fn(B_idx=consumer_state_dO.index, zero_init=False)
pipeline_dO.consumer_release(consumer_state_dO)
consumer_state_dO.advance()
pipeline_S_P.sync_object_full.arrive(0, pipeline_S_P.producer_mask, cta_group)
# signal to the epilogue that dV is ready
pipeline_dKV.sync_object_empty.wait(0, producer_phase_dKV)
pipeline_dKV.sync_object_full.arrive(0, pipeline_dKV.producer_mask, cta_group)
pipeline_dKV.sync_object_empty.wait(1, producer_phase_dKV)
# -----------------------------------------------------------
# Tail: Remaining dK and dQ
# -----------------------------------------------------------
# pipeline_dS.consumer_wait(consumer_state_dS)
# dK += dS.T @ Q
pipeline_Qt.consumer_wait(consumer_state_Qt)
pipeline_dP.sync_object_empty.wait(0, producer_phase_acc) # dP -> dS
mma_dsq_fn(B_idx=consumer_state_Qt.index, zero_init=not accumulate_dK)
pipeline_Qt.consumer_release(consumer_state_Qt)
consumer_state_Qt.advance()
# signal to the epilogue that dK is ready
pipeline_dKV.sync_object_full.arrive(1, pipeline_dKV.producer_mask, cta_group)
producer_phase_dKV ^= 1
# dQ = dS @ K
pipeline_dS.consumer_wait(consumer_state_dS)
cute.arch.mbarrier_wait(dS_cluster_leader_mbar_ptr, phase=dS_cluster_phase)
pipeline_dQ.sync_object_empty.wait(0, producer_phase_dQ)
mma_dsk_fn()
pipeline_dQ.sync_object_full.arrive(0, pipeline_dQ.producer_mask, cta_group)
pipeline_dS.consumer_release(consumer_state_dS)
pipeline_Kt.consumer_release(consumer_state_Kt)
consumer_state_dS.advance()
consumer_state_Kt.advance()
dS_cluster_phase ^= 1
producer_phase_dQ ^= 1
producer_phase_acc ^= 1
else:
if is_leader_cta and process_tile:
accumulate_dK = False
# -----------------------------------------------------------
###### Prologue
# -----------------------------------------------------------
# 1. S = Q0 @ K.T
# 2. dP = V @ dOt.T
# 3. dV = P @ dO
# 1) S = K @ Q
handle_Q = pipeline_Q_consumer.wait_and_advance()
pipeline_S_P.sync_object_empty.wait(0, producer_phase_acc)
mma_qk_fn(B_idx=handle_Q.index)
pipeline_S_P.sync_object_full.arrive(0, pipeline_S_P.producer_mask, cta_group)
# 2) dP = V @ dOt.T
pipeline_dO.consumer_wait(consumer_state_dO)
pipeline_dP.sync_object_empty.wait(0, producer_phase_acc)
pipeline_dQ.sync_object_empty.wait(0, producer_phase_acc)
mma_dov_fn(B_idx=consumer_state_dO.index)
pipeline_dP.sync_object_full.arrive(0, pipeline_dP.producer_mask, cta_group)
producer_phase_acc ^= 1
# 3) dV = P.T @ dO
pipeline_S_P.sync_object_empty.wait(0, producer_phase_acc)
mma_pdo_fn(B_idx=consumer_state_dO.index, zero_init=True)
pipeline_dO.consumer_release(consumer_state_dO)
consumer_state_dO.advance()
# -----------------------------------------------------------
###### MAIN LOOP
# -----------------------------------------------------------
# 1. S = K @ Q.T
# 2. dQ = dS @ K
# 3. dK = dS.T @ Q
# 4. dP = V @ dOt.T
# 5. dV = P.T @ dO
# For block sparsity, we use block_iter_count; for dense, use m_block range
# MMA doesn't need actual m_block indices, just the iteration count
main_loop_iters = (
block_iter_count - 1
if const_expr(self.use_block_sparsity)
else m_block_max - m_block_min - 1
)
handle_Q_next = handle_Q
for _ in cutlass.range(main_loop_iters, unroll=1):
# (1) S.T = K @ Q.T
handle_Q_next = pipeline_Q_consumer.wait_and_advance()
mma_qk_fn(B_idx=handle_Q_next.index)
pipeline_S_P.sync_object_full.arrive(
0, pipeline_S_P.producer_mask, cta_group
)
# (2) dK += dS.T @ Q
pipeline_dS.consumer_wait(consumer_state_dS)
mma_dsq_fn(B_idx=handle_Q.index, zero_init=not accumulate_dK)
accumulate_dK = True
handle_Q.release()
# (3) dQ = dS @ K
mma_dsk_fn()
pipeline_dQ.sync_object_full.arrive(0, pipeline_dQ.producer_mask, cta_group)
pipeline_dS.consumer_release(consumer_state_dS)
consumer_state_dS.advance()
# (4) dP = V @ dO.T
pipeline_dO.consumer_wait(consumer_state_dO)
pipeline_dQ.sync_object_empty.wait(0, producer_phase_acc)
mma_dov_fn(B_idx=consumer_state_dO.index)
pipeline_dP.sync_object_full.arrive(0, pipeline_dP.producer_mask, cta_group)
# (5) dV += P.T @ dO
producer_phase_acc ^= 1
pipeline_S_P.sync_object_empty.wait(0, producer_phase_acc)
mma_pdo_fn(B_idx=consumer_state_dO.index, zero_init=False)
pipeline_dO.consumer_release(consumer_state_dO)
consumer_state_dO.advance()
handle_Q = handle_Q_next
pipeline_S_P.sync_object_full.arrive(0, pipeline_S_P.producer_mask, cta_group)
# signal to the epilogue that dV is ready
# pipeline_dKV.producer_acquire(producer_state_dKV)
pipeline_dKV.sync_object_empty.wait(0, producer_phase_dKV)
# pipeline_dKV.producer_commit(producer_state_dKV)
pipeline_dKV.sync_object_full.arrive(0, pipeline_dKV.producer_mask, cta_group)
# producer_state_dKV.advance()
# pipeline_dKV.producer_acquire(producer_state_dKV)
pipeline_dKV.sync_object_empty.wait(1, producer_phase_dKV)
# -----------------------------------------------------------
# Tail: Remaining dK and dQ
# -----------------------------------------------------------
# 1) dK += dS.T @ Q
pipeline_dS.consumer_wait(consumer_state_dS)
mma_dsq_fn(B_idx=handle_Q.index, zero_init=not accumulate_dK)
# signal to the epilogue that dK is ready
pipeline_dKV.sync_object_full.arrive(1, pipeline_dKV.producer_mask, cta_group)
producer_phase_dKV ^= 1
# 2) dQ = dS @ K
mma_dsk_fn()
pipeline_dQ.sync_object_full.arrive(0, pipeline_dQ.producer_mask, cta_group)
handle_Q.release()
pipeline_dS.consumer_release(consumer_state_dS)
consumer_state_dS.advance()
producer_phase_acc ^= 1
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
# Currently it hangs if we have this S_P.producer_tail, will need to understand why
# pipeline_S_P.producer_tail(producer_state_S_P)
# pipeline_dP.producer_tail(producer_state_dP)
# pipeline_dKV.producer_tail(producer_state_dKV)
# pipeline_dQ.producer_tail(producer_state_dQ)
@cute.jit
def split_wg(
self,
t: cute.Tensor,
wg_idx: cutlass.Int32,
num_wg: cutlass.Constexpr[int],
):
reduced_shape = cute.product_each(t.shape)
rank = len(reduced_shape)
if const_expr(reduced_shape[1] > 1):
assert rank >= 2, "Need rank >= 2 for t in split_wg"
t = cute.logical_divide(t, (reduced_shape[0], reduced_shape[1] // num_wg))
coord = (None, (None, wg_idx)) + (None,) * (rank - 2)
else:
assert rank >= 3, "Need rank >= 3 for t in split_wg"
if const_expr(rank == 3):
t = cute.logical_divide(
t, (reduced_shape[0], reduced_shape[1], reduced_shape[2] // num_wg)
)
coord = (
None,
None,
(None, wg_idx),
) + (None,) * (rank - 3)
else:
t = cute.logical_divide(
t,
(
reduced_shape[0],
reduced_shape[1],
reduced_shape[2],
reduced_shape[3] // num_wg,
),
)
coord = (
None,
None,
None,
(None, wg_idx),
) + (None,) * (rank - 4)
return t[coord]
@cute.jit
def apply_score_mod(
self,
tSrS_t2r,
thr_copy_t2r,
thr_mma_S,
batch_idx,
head_idx,
m_block,
n_block,
softmax_scale,
seqlen_info,
aux_tensors=None,
fastdiv_mods=(None, None),
):
"""Apply forward score modification for SM100 backward pass."""
# In bwd, S is computed as K @ Q.T so dimensions are (tile_n, tile_m)
cS = cute.make_identity_tensor((self.tile_n, self.tile_m))
cS = cute.domain_offset((n_block * self.tile_n, m_block * self.tile_m), cS)
tScS = thr_mma_S.partition_C(cS)
tScS_idx = thr_copy_t2r.partition_D(tScS)
apply_score_mod_inner(
tSrS_t2r,
tScS_idx,
self.score_mod,
batch_idx,
head_idx,
softmax_scale,
self.vec_size,
self.qk_acc_dtype,
aux_tensors,
fastdiv_mods,
seqlen_info,
constant_q_idx=None,
qhead_per_kvhead=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
transpose_indices=True,
)
@cute.jit
def apply_score_mod_bwd(
self,
grad_tensor,
score_tensor,
index_tensor,
batch_idx,
head_idx,
softmax_scale,
seqlen_info,
aux_tensors=None,
fastdiv_mods=(None, None),
):
"""Apply backward score modification (joint graph) for SM100."""
apply_score_mod_bwd_inner(
grad_tensor,
score_tensor,
index_tensor,
self.score_mod_bwd,
batch_idx,
head_idx,
softmax_scale,
self.vec_size,
self.qk_acc_dtype,
aux_tensors,
fastdiv_mods,
seqlen_info,
constant_q_idx=None,
qhead_per_kvhead=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
transpose_indices=True,
)
@cute.jit
def compute_loop(
self,
thr_mma_S: cute.core.ThrMma,
thr_mma_dP: cute.core.ThrMma,
thr_mma_dV: cute.core.ThrMma,
thr_mma_dK: cute.core.ThrMma,
tStS: cute.Tensor,
tdPtdP: cute.Tensor,
tdVtdV: cute.Tensor,
tdKtdK: cute.Tensor,
sLSE: cute.Tensor,
sdPsum: cute.Tensor,
mdV: cute.Tensor,
mdK: cute.Tensor,
sdS: cute.Tensor,
sdS_xchg: cute.Tensor,
pipeline_LSE: PipelineAsync,
pipeline_dPsum: PipelineAsync,
pipeline_S_P: PipelineAsync,
pipeline_dS: PipelineAsync,
pipeline_dKV: PipelineAsync,
pipeline_dP: PipelineAsync,
dS_cluster_empty_mbar_ptr: cute.Pointer,
dS_cluster_full_mbar_ptr: cute.Pointer,
dQaccum_empty_mbar_ptr: cute.Pointer,
softmax_scale: cutlass.Float32,
softmax_scale_log2: cutlass.Float32,
block_info: BlockInfo,
SeqlenInfoCls: Callable,
AttentionMaskCls: Callable,
TileSchedulerCls: Callable,
sdV: Optional[cute.Tensor],
sdK: Optional[cute.Tensor],
mdV_tma_tensor: Optional[cute.Tensor],
mdK_tma_tensor: Optional[cute.Tensor],
tma_atom_dV: Optional[cute.CopyAtom],
tma_atom_dK: Optional[cute.CopyAtom],
tiled_copy_r2s_dKV: Optional[cute.TiledCopy],
mdK_semaphore: Optional[cute.Tensor],
mdV_semaphore: Optional[cute.Tensor],
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
blocksparse_tensors: Optional[BlockSparseTensors] = None,
):
sLSE_2D = cute.make_tensor(
sLSE.iterator,
cute.make_layout(
(self.tile_m, self.tile_n, self.Q_stage),
stride=(1, 0, cute.round_up(self.tile_m, 64)),
),
)
sdPsum_2D = cute.make_tensor(
sdPsum.iterator,
cute.make_layout(
(self.tile_m, self.tile_n, self.dO_stage),
stride=(1, 0, cute.round_up(self.tile_m, 64)),
),
)
# if const_expr(self.SdP_swapAB):
if const_expr(True):
sLSE_2D = layout_utils.transpose_view(sLSE_2D)
sdPsum_2D = layout_utils.transpose_view(sdPsum_2D)
# tix: [128...384] 8 warps
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx()) # 4-11
tidx = cute.arch.thread_idx()[0] % (cute.arch.WARP_SIZE * len(self.compute_warp_ids))
# tidx = cute.arch.thread_idx()[0] - (cute.arch.WARP_SIZE * self.compute_warp_ids[0])
dp_idx = tidx % 128
num_wg = len(self.compute_warp_ids) // 4 # 2
# wg_idx:
# 0: [256...384]
# 1: [128...256]
tileP_f32_like = self.cta_tiler[1] // 32 * self.v_dtype.width
# tStS has shape ((128, 128), 1, 1), tStP has shape ((128, 64), 1, 1)
# tP overlap with tS
tStP = cute.composition(tStS, (cute.make_layout((self.tile_n, tileP_f32_like)), 1, 1))
tStP = cute.make_tensor(tStS.iterator, tStP.layout) # Otherwise the tmem address is wrong
tScS = thr_mma_S.partition_C(cute.make_identity_tensor(self.mma_tiler_kq[:2]))
tScP = cute.composition(tScS, (cute.make_layout((self.tile_n, tileP_f32_like)), 1, 1))
# tdS overlap with tdP
tdPtdS = cute.composition(tdPtdP, (cute.make_layout((self.tile_n, tileP_f32_like)), 1, 1))
tdPcdP = thr_mma_dP.partition_C(cute.make_identity_tensor(self.mma_tiler_vdo[:2]))
tdPcdS = cute.composition(tdPcdP, (cute.make_layout((self.tile_n, tileP_f32_like)), 1, 1))
# 2-CTA assumes: repetiton should always be 32 & 16
tmem_load_atom = cute.make_copy_atom(
tcgen05.copy.Ld32x32bOp(tcgen05.copy.Repetition(32)), Float32
)
tmem_store_atom = cute.make_copy_atom(
tcgen05.copy.St32x32bOp(tcgen05.copy.Repetition(16)), Float32
)
# tmem -> rmem
thr_copy_t2r = copy_utils.make_tmem_copy(tmem_load_atom, num_wg).get_slice(tidx)
tStS_t2r = thr_copy_t2r.partition_S(tStS) # (((32, 32), 1), 2, 1, 1)
tdPtdP_t2r = thr_copy_t2r.partition_S(tdPtdP)
tScS_t2r = thr_copy_t2r.partition_D(tScS) # ((32, 1), 2, 1, 1)
t0ScS_t2r = thr_copy_t2r.get_slice(0).partition_D(tScS) # ((32, 1), 2, 1, 1)
# ((32, 1), 2, 1, 1, STAGE)
tSsLSE = thr_copy_t2r.partition_D(thr_mma_S.partition_C(sLSE_2D))
tSsdPsum = thr_copy_t2r.partition_D(thr_mma_dP.partition_C(sdPsum_2D))
# rmem -> tmem
thr_copy_r2t = copy_utils.make_tmem_copy(tmem_store_atom, num_wg).get_slice(tidx)
tScP_r2t = thr_copy_r2t.partition_S(tScP)
tStP_r2t = thr_copy_r2t.partition_D(tStP)
tdPcdS_r2t = thr_copy_r2t.partition_S(tdPcdS)
tdPtdS_r2t = thr_copy_r2t.partition_D(tdPtdS)
# rmem -> smem
# This part is a bit iffy, we might be making a lot of assumptions here
copy_atom_r2s = sm100_utils_basic.get_smem_store_op(
LayoutEnum.ROW_MAJOR, self.ds_dtype, Float32, thr_copy_t2r
)
thr_copy_r2s = cute.make_tiled_copy_D(copy_atom_r2s, thr_copy_t2r).get_slice(tidx)
# We assume the swizzle (i.e. layout.inner) stays the same
sdS_epi_layout = sm100_utils_basic.make_smem_layout_epi(
self.ds_dtype, LayoutEnum.ROW_MAJOR, (self.tile_n, self.tile_m), 1
)
sdS_layout = cute.slice_(sdS_epi_layout.outer, (None, None, 0)) # ((8,16), (64,2))
# Need to group into 1 mode to be compatible w thr_copy_r2s
sdS_layout = cute.make_layout((sdS_layout.shape,), stride=(sdS_layout.stride,))
sdS_epi = cute.make_tensor(sdS.iterator, sdS_layout)
tRS_sdS = thr_copy_r2s.partition_D(sdS_epi)
if const_expr(self.use_2cta_instrs):
sdS_xchg_epi = cute.make_tensor(
cute.recast_ptr(sdS_xchg.iterator, sdS_epi_layout.inner), sdS_layout
)
tRS_sdS_xchg = thr_copy_r2s.partition_D(sdS_xchg_epi)
cta_rank_in_cluster = cute.arch.make_warp_uniform(cute.arch.block_idx_in_cluster())
dS_cluster_empty_phase = Int32(1)
# 2-CTA: CTA 0 exchanges stage 1 (bottom half), CTA 1 exchanges stage 0 (top half)
exchange_stage = cta_rank_in_cluster ^ 1 if const_expr(self.use_2cta_instrs) else Int32(0)
consumer_state_S_P_dP = pipeline.make_pipeline_state( # Our impl has shortcut for stage==1
cutlass.pipeline.PipelineUserType.Consumer, 1
)
# consumer_phase_S_P_dP = Int32(0)
producer_state_dS = pipeline.make_pipeline_state( # Our impl has shortcut for stage==1
cutlass.pipeline.PipelineUserType.Producer, 1
)
consumer_state_dKV = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, 2
)
consumer_state_LSE = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, self.Q_stage
)
consumer_state_dPsum = pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, self.dO_stage
)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
n_block, head_idx, batch_idx, _ = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
m_block_min, m_block_max = block_info.get_m_block_min_max(
seqlen, n_block // self.cluster_shape_mnk[0]
)
mask = AttentionMaskCls(seqlen)
n_block_for_cluster = n_block // self.cta_group_size
# TODO: condition mask_seqlen
mask_fn = partial(
mask.apply_mask_sm100_transposed,
tScS_t2r=tScS_t2r,
t0ScS_t2r=t0ScS_t2r,
n_block=n_block_for_cluster,
mask_seqlen=True,
mask_causal=self.is_causal,
mask_local=self.is_local,
mask_mod=self.mask_mod,
batch_idx=batch_idx,
head_idx=head_idx,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
# prefetch_LSE = not self.is_causal
prefetch_LSE = False
# some tiles might be empty due to block sparsity
if const_expr(self.use_block_sparsity):
(
curr_q_cnt,
curr_q_idx,
curr_full_cnt,
curr_full_idx,
loop_count,
) = get_block_sparse_iteration_info_bwd(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
process_tile = loop_count > Int32(0)
else:
process_tile = (
const_expr(not self.is_local and not self.is_varlen_q)
or m_block_min < m_block_max
)
loop_count = m_block_max - m_block_min
# Mainloop
# Block sparsity: iterate over sparse m_block count and derive actual m_block
# from Q_IDX/FULL_Q_IDX tensors. Dense: iterate m_block_min..m_block_max directly.
for iter_idx in cutlass.range(loop_count, unroll=1):
if const_expr(self.use_block_sparsity):
m_block, is_full_block = get_m_block_from_iter_bwd(
iter_idx,
curr_q_cnt,
curr_q_idx,
curr_full_cnt,
curr_full_idx,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
m_block_oob = m_block >= m_block_max
else:
m_block = m_block_min + iter_idx
m_block_oob = False
is_full_block = False
# Prefetch 1 stage of LSE
pipeline_LSE.consumer_wait(consumer_state_LSE)
tSrLSE_s2r = cute.make_fragment(tScS_t2r[None, 0, 0, 0].shape, Float32)
if const_expr(prefetch_LSE and not self.shuffle_LSE):
cute.autovec_copy(tSsLSE[None, 0, 0, 0, consumer_state_LSE.index], tSrLSE_s2r)
pipeline_S_P.consumer_wait(consumer_state_S_P_dP)
# pipeline_S_P.sync_object_full.wait(0, consumer_phase_S_P_dP)
#### TMEM->RMEM (Load S from TMEM)
tSrS_t2r = cute.make_fragment(tScS_t2r.shape, Float32)
cute.copy(thr_copy_t2r, tStS_t2r, tSrS_t2r)
if const_expr(self.tile_hdim == 192):
# Signal S tmem load completion using pipeline_S_P when hdim 192
# dP is overlapped with S
cute.arch.fence_view_async_tmem_load()
with cute.arch.elect_one():
pipeline_S_P.consumer_release(consumer_state_S_P_dP)
elif const_expr(self.use_2cta_instrs and self.tile_hdim <= 128):
# Signal S tmem load completion using pipeline_dS when 2cta hdim 128
# dQ is overlapped with S
if iter_idx > 0:
cute.arch.fence_view_async_tmem_load()
with cute.arch.elect_one():
pipeline_dS.producer_commit(producer_state_dS)
producer_state_dS.advance()
if const_expr(self.score_mod_bwd is not None):
tSrS_pre = cute.make_fragment_like(tSrS_t2r)
cute.autovec_copy(tSrS_t2r, tSrS_pre)
if const_expr(self.score_mod is not None):
# Apply score_mod FIRST -> matches forward
self.apply_score_mod(
tSrS_t2r,
thr_copy_t2r,
thr_mma_S,
batch_idx,
head_idx,
m_block,
n_block,
softmax_scale,
seqlen,
aux_tensors,
fastdiv_mods,
)
#### APPLY MASK (after score_mod, matching forward pass order)
check_m_boundary = (m_block + 1) * self.tile_m > seqlen.seqlen_q
mask_fn(
tSrS_t2r,
m_block=m_block,
is_full_block=is_full_block,
check_m_boundary=check_m_boundary,
)
num_stages = cute.size(tScS_t2r, mode=[1])
# ---------------------------------------------
#### P = exp(S - LSE)
# ---------------------------------------------
lane_idx = cute.arch.lane_idx()
tSrP_r2t_f32 = cute.make_fragment(tScP_r2t.shape, Float32) # 64
tSrP_r2t = cute.recast_tensor(tSrP_r2t_f32, self.q_dtype)
for stage in cutlass.range_constexpr(num_stages):
tSrS_cur = tSrS_t2r[None, stage, 0, 0]
tSsLSE_cur = tSsLSE[None, stage, 0, 0, consumer_state_LSE.index]
if const_expr(not self.shuffle_LSE):
if const_expr(stage > 0 or not prefetch_LSE):
cute.autovec_copy(tSsLSE_cur, tSrLSE_s2r)
tSrLSE = tSrLSE_s2r
else:
tSrLSE = tSsLSE_cur[lane_idx]
for v in cutlass.range_constexpr(cute.size(tSrS_t2r, mode=[0]) // 2):
if const_expr(not self.shuffle_LSE):
lse_pair = (tSrLSE[2 * v], tSrLSE[2 * v + 1])
else:
lse_pair = (
utils.shuffle_sync(tSrLSE, offset=2 * v),
utils.shuffle_sync(tSrLSE, offset=2 * v + 1),
)
tSrS_cur[2 * v], tSrS_cur[2 * v + 1] = cute.arch.fma_packed_f32x2(
((tSrS_cur[2 * v], tSrS_cur[2 * v + 1])),
(softmax_scale_log2, softmax_scale_log2),
(-lse_pair[0], -lse_pair[1]),
)
tSrS_cur[2 * v] = cute.math.exp2(tSrS_cur[2 * v], fastmath=True)
tSrS_cur[2 * v + 1] = cute.math.exp2(tSrS_cur[2 * v + 1], fastmath=True)
utils.cvt_f16(tSrS_cur, tSrP_r2t[None, stage, 0, 0])
if const_expr(stage == 0):
cute.arch.fence_view_async_tmem_load()
# Without this barrier, we could have 1 warp writing to P in tmem while
# another warp is still reading S from tmem.
self.compute_sync_barrier.arrive_and_wait()
cute.copy(
thr_copy_r2t,
tSrP_r2t_f32[None, stage, None, None],
tStP_r2t[None, stage, None, None],
)
cute.arch.fence_view_async_tmem_store()
cute.arch.fence_view_async_shared()
self.compute_sync_barrier.arrive_and_wait()
if const_expr(not self.tile_hdim == 192):
# Signal tmem store P completion with pipeline_S_P
with cute.arch.elect_one():
pipeline_S_P.consumer_release(consumer_state_S_P_dP)
# pipeline_S_P.sync_object_empty.arrive(0, pipeline_S_P.consumer_mask)
# Normally we'd need syncwarp here since only 1 thread will signal in
# consumer_release, but we already have the self.compute_sync_barrier before this
pipeline_LSE.consumer_release(consumer_state_LSE)
consumer_state_LSE.advance()
# ---------------------------------------------
# dS.T = P.T * (dP.T - D)
# ---------------------------------------------
pipeline_dPsum.consumer_wait(consumer_state_dPsum)
pipeline_dP.consumer_wait(consumer_state_S_P_dP)
# pipeline_dP.sync_object_full.wait(0, consumer_phase_S_P_dP)
### Now delayed to after loop
# consumer_state_S_P_dP.advance()
# consumer_phase_S_P_dP ^= 1
##### dS.T = P.T * (dP.T - Psum)
for stage in cutlass.range_constexpr(num_stages):
tdPrdP_t2r = cute.make_fragment(tScS_t2r[None, 0, None, None].shape, Float32)
cute.copy(thr_copy_t2r, tdPtdP_t2r[None, stage, None, None], tdPrdP_t2r)
cute.arch.fence_view_async_tmem_load()
self.compute_sync_barrier.arrive_and_wait()
tdPrdP_cur = tdPrdP_t2r[None, 0, 0]
tSrS_cur = tSrS_t2r[None, stage, 0, 0]
tSsdPsum_cur = tSsdPsum[None, stage, 0, 0, consumer_state_dPsum.index]
if const_expr(not self.shuffle_dPsum):
tSrdPsum = cute.make_fragment_like(tSsdPsum_cur, Float32)
cute.autovec_copy(tSsdPsum_cur, tSrdPsum)
else:
tSrdPsum = tSsdPsum_cur[lane_idx]
for v in cutlass.range_constexpr(cute.size(tdPrdP_t2r, mode=[0]) // 2):
if const_expr(not self.shuffle_dPsum):
dPsum_pair = (tSrdPsum[2 * v], tSrdPsum[2 * v + 1])
else:
dPsum_pair = (
utils.shuffle_sync(tSrdPsum, offset=2 * v),
utils.shuffle_sync(tSrdPsum, offset=2 * v + 1),
)
tdPrdP_cur[2 * v], tdPrdP_cur[2 * v + 1] = (
quack.activation.sub_packed_f32x2(
(tdPrdP_cur[2 * v], tdPrdP_cur[2 * v + 1]), dPsum_pair
)
)
tdPrdP_cur[2 * v], tdPrdP_cur[2 * v + 1] = cute.arch.mul_packed_f32x2(
(tSrS_cur[2 * v], tSrS_cur[2 * v + 1]),
(tdPrdP_cur[2 * v], tdPrdP_cur[2 * v + 1]),
)
if const_expr(self.score_mod_bwd is not None):
tSrS_pre_cur = tSrS_pre[None, stage, 0, 0]
cS_bwd = cute.make_identity_tensor((self.tile_n, self.tile_m))
cS_bwd = cute.domain_offset(
(n_block * self.tile_n, m_block * self.tile_m), cS_bwd
)
tScS_bwd = thr_mma_S.partition_C(cS_bwd)
tScS_idx_bwd = thr_copy_t2r.partition_D(tScS_bwd)
tScS_idx_cur = tScS_idx_bwd[None, stage, 0, 0]
self.apply_score_mod_bwd(
tdPrdP_cur,
tSrS_pre_cur,
tScS_idx_cur,
batch_idx,
head_idx,
softmax_scale,
seqlen,
aux_tensors,
fastdiv_mods,
)
# Zero out OOB positions (kv_idx >= seqlen_k) after score_mod_bwd
for i in cutlass.range(cute.size(tdPrdP_cur), unroll_full=True):
kv_idx = tScS_idx_cur[i][0]
tdPrdP_cur[i] = 0.0 if kv_idx >= seqlen.seqlen_k else tdPrdP_cur[i]
tdPrdS_cvt = cute.make_fragment_like(tdPrdP_cur, self.ds_dtype)
utils.cvt_f16(tdPrdP_cur, tdPrdS_cvt)
if const_expr(stage == 0):
pipeline_dS.producer_acquire(producer_state_dS)
if const_expr(self.use_2cta_instrs):
tdPrdS_xchg = cute.make_fragment_like(tdPrdS_cvt, self.ds_dtype)
# RMEM->TMEM: always write to TMEM for MMA
if const_expr(not self.use_smem_dS_for_mma_dK or self.use_2cta_instrs):
tdPrdS_r2t_f32 = cute.recast_tensor(tdPrdS_cvt, Float32)
cute.copy(thr_copy_r2t, tdPrdS_r2t_f32, tdPtdS_r2t[None, stage, 0, 0])
# RMEM->SMEM: For 2-CTA, keep exchange stage in registers, write non-exchange to sdS
if const_expr(self.use_2cta_instrs):
if exchange_stage == stage:
cute.autovec_copy(tdPrdS_cvt, tdPrdS_xchg)
else:
cute.autovec_copy(tdPrdS_cvt, tRS_sdS[None, stage])
else:
cute.autovec_copy(tdPrdS_cvt, tRS_sdS[None, stage])
if const_expr(not self.use_smem_dS_for_mma_dK):
cute.arch.fence_view_async_tmem_store()
if const_expr(self.use_2cta_instrs):
# use pipeline_dP to signal tmem store of dS
with cute.arch.elect_one():
pipeline_dP.consumer_release(consumer_state_S_P_dP)
consumer_state_S_P_dP.advance()
# After the loop: copy exchange registers to sdS_xchg buffer
if const_expr(self.use_2cta_instrs):
# when hdim 192, sdQaccum overlapped with sdS_xchg
if const_expr(self.tile_hdim == 192):
cute.arch.mbarrier_wait(
dQaccum_empty_mbar_ptr, phase=producer_state_dS.phase
)
cute.autovec_copy(tdPrdS_xchg, tRS_sdS_xchg[None, 0])
cute.arch.fence_view_async_shared()
self.compute_sync_barrier.arrive_and_wait()
# Normally we'd need syncwarp here since only 1 thread will signal in
# consumer_release, but we already have the self.compute_sync_barrier before this
pipeline_dPsum.consumer_release(consumer_state_dPsum)
consumer_state_dPsum.advance()
# when 2cta hdim 128, pipeline_dS also signals S tmem load completion so is deferred
if const_expr(not (self.use_2cta_instrs and self.tile_hdim == 128)):
with cute.arch.elect_one():
pipeline_dS.producer_commit(producer_state_dS)
producer_state_dS.advance()
# 2-CTA: DSMEM copy from sdS_xchg to peer's sdS buffer
if const_expr(self.use_2cta_instrs):
stage_copy_bytes = const_expr(self.tma_copy_bytes["dS"] // 2)
stage_copy_elems = const_expr(stage_copy_bytes // (self.ds_dtype.width // 8))
if tidx == 0:
peer_cta_rank_in_cluster = cta_rank_in_cluster ^ 1
smem_src_ptr = sdS_xchg.iterator
# Destination is peer's sdS at our CTA's offset (exchange_stage position)
smem_dst_ptr = sdS.iterator + cta_rank_in_cluster * stage_copy_elems
cute.arch.mbarrier_arrive_and_expect_tx(
dS_cluster_full_mbar_ptr,
stage_copy_bytes,
peer_cta_rank_in_cluster=peer_cta_rank_in_cluster,
)
copy_utils.cpasync_bulk_s2cluster(
smem_src_ptr,
smem_dst_ptr,
dS_cluster_full_mbar_ptr,
stage_copy_bytes,
peer_cta_rank_in_cluster=peer_cta_rank_in_cluster,
)
# Final signal for dS smem store completion
if const_expr(self.use_2cta_instrs and self.tile_hdim == 128):
if process_tile:
with cute.arch.elect_one():
pipeline_dS.producer_commit(producer_state_dS)
producer_state_dS.advance()
# Epilogue
# Run epilogue if we processed any m_blocks for this n_block
if process_tile:
if const_expr(not self.use_tma_store):
consumer_state_dKV = self.epilogue_dKV(
dp_idx,
warp_idx,
batch_idx,
head_idx,
n_block,
seqlen,
thr_mma_dV,
thr_mma_dK,
tdVtdV,
tdKtdK,
mdV,
mdK,
pipeline_dKV,
consumer_state_dKV,
softmax_scale,
)
else:
thr_copy_r2s_dKV = tiled_copy_r2s_dKV.get_slice(dp_idx)
#### STORE dV
consumer_state_dKV = self.epilogue_dK_or_dV_tma(
dp_idx,
batch_idx,
head_idx,
n_block,
seqlen,
thr_mma_dV,
tdVtdV,
mdV_tma_tensor,
sdV,
tma_atom_dV,
thr_copy_r2s_dKV,
pipeline_dKV,
consumer_state_dKV,
None, # Don't scale
int(NamedBarrierBwdSm100.EpilogueWG1), # barrier_id
mdV_semaphore,
"V",
)
#### STORE dK
consumer_state_dKV = self.epilogue_dK_or_dV_tma(
dp_idx,
batch_idx,
head_idx,
n_block,
seqlen,
thr_mma_dK,
tdKtdK,
mdK_tma_tensor,
sdK,
tma_atom_dK,
thr_copy_r2s_dKV,
pipeline_dKV,
consumer_state_dKV,
softmax_scale if const_expr(not self.dKV_postprocess) else None,
int(NamedBarrierBwdSm100.EpilogueWG1), # barrier_id
mdK_semaphore,
"K",
)
# Zero dK/dV for empty tiles (local attention or block sparsity)
# When total_m_block_cnt == 0 for block sparsity, no Q tiles contribute to this KV tile
if const_expr(not self.dKV_postprocess):
should_zero_dKV = False
if const_expr(self.is_local or self.is_varlen_q):
should_zero_dKV = m_block_min >= m_block_max
if const_expr(self.use_block_sparsity):
# For block sparsity, zero when no m_blocks contribute to this n_block
if not process_tile:
should_zero_dKV = True
if should_zero_dKV:
# For 2-CTA: use cluster-wide tile size (cta_group_size * tile_n)
cluster_tile_n = self.tile_n * self.cta_group_size
n_block_for_tile = n_block // self.cta_group_size
gmem_tiled_copy_zero_dK = copy_utils.tiled_copy_2d(
self.dk_dtype,
math.gcd(64, self.tile_hdim),
128, # num_threads
)
gmem_tiled_copy_zero_dV = copy_utils.tiled_copy_2d(
self.dv_dtype,
math.gcd(64, self.tile_hdimv),
128, # num_threads
)
gmem_thr_copy_zero_dK = gmem_tiled_copy_zero_dK.get_slice(dp_idx)
gmem_thr_copy_zero_dV = gmem_tiled_copy_zero_dV.get_slice(dp_idx)
mdV_cur = seqlen.offset_batch_K(mdV, batch_idx, dim=3)[None, None, head_idx]
mdK_cur = seqlen.offset_batch_K(mdK, batch_idx, dim=3)[None, None, head_idx]
gdK = cute.local_tile(
mdK_cur, (cluster_tile_n, self.tile_hdim), (n_block_for_tile, 0)
)
gdV = cute.local_tile(
mdV_cur, (cluster_tile_n, self.tile_hdimv), (n_block_for_tile, 0)
)
tdKgdK = gmem_thr_copy_zero_dK.partition_D(gdK)
tdVgdV = gmem_thr_copy_zero_dV.partition_D(gdV)
cdK = cute.make_identity_tensor((cluster_tile_n, self.tile_hdim))
cdV = cute.make_identity_tensor((cluster_tile_n, self.tile_hdimv))
tdKcdK = gmem_thr_copy_zero_dK.partition_D(cdK)
tdVcdV = gmem_thr_copy_zero_dV.partition_D(cdV)
assert cute.size(tdKgdK[None, 0, 0]) == cute.size(tdVgdV[None, 0, 0])
zero = cute.make_fragment_like(tdKgdK[None, 0, 0])
zero.fill(0.0)
if tidx < 128:
for i in cutlass.range_constexpr(tdKgdK.shape[1]):
row_idx = tdKcdK[0, i, 0][0]
if row_idx < seqlen.seqlen_k - cluster_tile_n * n_block_for_tile:
for j in cutlass.range_constexpr(tdKgdK.shape[2]):
cute.copy(gmem_tiled_copy_zero_dK, zero, tdKgdK[None, i, j])
else:
for i in cutlass.range_constexpr(tdVgdV.shape[1]):
row_idx = tdVcdV[0, i, 0][0]
if row_idx < seqlen.seqlen_k - cluster_tile_n * n_block_for_tile:
for j in cutlass.range_constexpr(tdVgdV.shape[2]):
cute.copy(gmem_tiled_copy_zero_dV, zero, tdVgdV[None, i, j])
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
@cute.jit
def dQacc_reduce(
self,
mdQaccum: cute.Tensor,
sdQaccum: cute.Tensor,
thr_mma_dQ: cute.core.ThrMma,
tdQtdQ: cute.Tensor,
pipeline_dQ: PipelineAsync,
dQaccum_empty_mbar_ptr: Optional[cute.Pointer],
block_info: BlockInfo,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
mdQ_semaphore: Optional[cute.Tensor],
blocksparse_tensors: Optional[BlockSparseTensors] = None,
):
num_reduce_threads = cute.arch.WARP_SIZE * len(self.reduce_warp_ids)
tidx = cute.arch.thread_idx()[0] % num_reduce_threads
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx() % len(self.reduce_warp_ids))
is_tma_warp = warp_idx == 0
cta_rank_in_cluster = cute.arch.make_warp_uniform(cute.arch.block_idx_in_cluster())
# TMEM -> RMEM
tmem_load_atom = cute.make_copy_atom(
tcgen05.copy.Ld32x32bOp(tcgen05.copy.Repetition(self.dQ_reduce_ncol_t2r)), Float32
)
thr_copy_t2r = tcgen05.make_tmem_copy(tmem_load_atom, tdQtdQ).get_slice(tidx)
tdQtdQ_t2r = thr_copy_t2r.partition_S(tdQtdQ)
tdQcdQ = thr_mma_dQ.partition_C(cute.make_identity_tensor(self.mma_tiler_dsk[:2]))
tdQrdQ_t2r_shape = thr_copy_t2r.partition_D(tdQcdQ).shape
# For 2-CTA: reduce_stage = dQaccum_reduce_stage_t2r / cta_group_size
expected_reduce_stages_t2r = self.dQaccum_reduce_stage_t2r // self.cta_group_size
assert cute.size(tdQrdQ_t2r_shape, mode=[1]) == expected_reduce_stages_t2r, (
"dQaccum t2r reduce stage mismatch"
)
expected_reduce_stages = self.dQaccum_reduce_stage // self.cta_group_size
# 2-CTA: CTA 0 -> (M/2, D) (stage 0, 1) & CTA 1 -> (M/2, D) (stage 2, 3)
stage_offset = (
expected_reduce_stages * cta_rank_in_cluster if const_expr(self.use_2cta_instrs) else 0
)
thr_copy_dQaccum_r2s = copy_utils.tiled_copy_1d(
self.dqaccum_dtype, num_reduce_threads, num_copy_elems=128 // self.dqaccum_dtype.width
).get_slice(tidx)
tdQsdQ = thr_copy_dQaccum_r2s.partition_D(sdQaccum)
read_flag = const_expr(not self.deterministic)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
dQ_consumer_state = pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, 1
)
dQ_tma_store_producer_state = pipeline.make_pipeline_state(
pipeline.PipelineUserType.Producer, self.sdQaccum_stage
)
while work_tile.is_valid_tile:
n_block, head_idx, batch_idx, _ = work_tile.tile_idx
n_block_cta_group = n_block // self.cta_group_size # for 2cta
seqlen = SeqlenInfoCls(batch_idx)
m_block_min, m_block_max = block_info.get_m_block_min_max(seqlen, n_block_cta_group)
if const_expr(not seqlen.has_cu_seqlens_q):
mdQaccum_cur = mdQaccum[None, head_idx, batch_idx]
else:
mdQaccum_cur = cute.domain_offset(
(seqlen.padded_offset_q * self.tile_hdim,), mdQaccum[None, head_idx]
)
gdQaccum_ = cute.local_tile(mdQaccum_cur, (self.tile_m * self.tile_hdim,), (None,))
# (M * K / STAGE, STAGE, _)
gdQaccum = cute.flat_divide(
gdQaccum_, (self.tile_m * self.tile_hdim // self.dQaccum_reduce_stage,)
)
if const_expr(self.deterministic):
mdQ_semaphore_cur = mdQ_semaphore[None, None, head_idx, batch_idx]
# delay_semaphore_release = self.is_causal and not self.tile_hdim == 192
delay_semaphore_release = not self.tile_hdim == 192
# some tiles might be empty due to block sparsity
if const_expr(self.use_block_sparsity):
(
curr_q_cnt,
curr_q_idx,
curr_full_cnt,
curr_full_idx,
loop_count,
) = get_block_sparse_iteration_info_bwd(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
process_tile = loop_count > Int32(0)
else:
process_tile = (
const_expr(not self.is_local and not self.is_varlen_q)
or m_block_min < m_block_max
)
loop_count = m_block_max - m_block_min
# dQacc_reduce mainloop
# Block sparsity: iterate over sparse m_block count and derive actual m_block
# from Q_IDX/FULL_Q_IDX tensors. Dense: iterate m_block_min..m_block_max directly.
for iter_idx in cutlass.range(loop_count, unroll=1):
if const_expr(self.use_block_sparsity):
m_block, _ = get_m_block_from_iter_bwd(
iter_idx,
curr_q_cnt,
curr_q_idx,
curr_full_cnt,
curr_full_idx,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
if m_block_max > 0:
m_block = cutlass.min(m_block, m_block_max - 1)
else:
m_block = m_block_min + iter_idx
pipeline_dQ.consumer_wait(dQ_consumer_state)
# TMEM -> RMEM
tdQrdQ_t2r = cute.make_fragment(tdQrdQ_t2r_shape, Float32)
cute.copy(thr_copy_t2r, tdQtdQ_t2r, tdQrdQ_t2r)
cute.arch.fence_view_async_tmem_load()
cute.arch.sync_warp()
with cute.arch.elect_one():
pipeline_dQ.consumer_release(dQ_consumer_state)
dQ_consumer_state.advance()
gdQaccum_cur = gdQaccum[None, None, m_block]
tdQrdQ_shape = (
self.dQ_reduce_ncol,
self.tile_hdim // self.cta_group_size // self.dQ_reduce_ncol,
)
tdQrdQ = cute.make_tensor(tdQrdQ_t2r.iterator, tdQrdQ_shape)
for stage in cutlass.range_constexpr(cute.size(tdQrdQ, mode=[1])):
smem_idx = dQ_tma_store_producer_state.index
tdQsdQ_r2s = tdQsdQ[None, None, smem_idx]
tdQrdQ_r2s = cute.make_tensor(tdQrdQ[None, stage].iterator, tdQsdQ_r2s.shape)
cute.copy(thr_copy_dQaccum_r2s, tdQrdQ_r2s, tdQsdQ_r2s)
# Fence and barrier to make sure shared memory store is visible to TMA store
cute.arch.fence_view_async_shared()
# semaphore acquire
if const_expr(self.deterministic and stage == 0):
if const_expr(self.spt):
_, n_block_max_for_m_block = block_info.get_n_block_min_max(
seqlen, m_block
)
lock_value = n_block_max_for_m_block - 1 - n_block_cta_group
else:
lock_value = n_block_cta_group
barrier.wait_eq(
mdQ_semaphore_cur[(m_block, None)].iterator,
tidx,
cta_rank_in_cluster,
lock_value,
)
self.reduce_sync_barrier.arrive_and_wait()
# Copy from shared memory to global memory
if is_tma_warp:
with cute.arch.elect_one():
copy_utils.cpasync_reduce_bulk_add_f32(
sdQaccum[None, smem_idx].iterator,
gdQaccum_cur[None, stage + stage_offset].iterator,
self.tma_copy_bytes["dQ"] // 1,
)
cute.arch.cp_async_bulk_commit_group()
cute.arch.cp_async_bulk_wait_group(self.sdQaccum_stage - 1, read=read_flag)
self.reduce_sync_barrier.arrive_and_wait()
dQ_tma_store_producer_state.advance()
# Directly add to gmem, much slower
# tdQgdQ = thr_copy_dQaccum_r2s.partition_D(gdQaccum[None, stage, m_block])
# assert cute.size(tdQrdQ_r2s) == cute.size(tdQgdQ)
# for i in cutlass.range(cute.size(tdQrdQ_r2s) // 4, unroll_full=True):
# copy_utils.atomic_add_fp32x4(
# tdQrdQ_r2s[4 * i],
# tdQrdQ_r2s[4 * i + 1],
# tdQrdQ_r2s[4 * i + 2],
# tdQrdQ_r2s[4 * i + 3],
# utils.elem_pointer(tdQgdQ, 4 * i),
# )
# semaphore release for prior m_block
if const_expr(self.deterministic and stage == 0 and delay_semaphore_release):
if m_block > m_block_min:
barrier.arrive_inc(
mdQ_semaphore_cur[(m_block - 1, None)].iterator,
tidx,
cta_rank_in_cluster,
1,
)
if const_expr(self.tile_hdim == 192):
if const_expr(self.sdQaccum_stage > 1):
if is_tma_warp:
cute.arch.cp_async_bulk_wait_group(0, read=read_flag)
self.reduce_sync_barrier.arrive_and_wait()
with cute.arch.elect_one():
cute.arch.mbarrier_arrive(dQaccum_empty_mbar_ptr)
# semaphore release
# NOTE: arrive_inc calls red_release which issues membar
if const_expr(self.deterministic and not delay_semaphore_release):
if const_expr(self.sdQaccum_stage > 1 and not self.tile_hdim == 192):
if is_tma_warp:
cute.arch.cp_async_bulk_wait_group(0, read=read_flag)
self.reduce_sync_barrier.arrive_and_wait()
barrier.arrive_inc(
mdQ_semaphore_cur[m_block, None].iterator, tidx, cta_rank_in_cluster, 1
)
if process_tile:
if is_tma_warp:
cute.arch.cp_async_bulk_wait_group(0, read=read_flag)
self.reduce_sync_barrier.arrive_and_wait()
# final semaphore release
if const_expr(self.deterministic and delay_semaphore_release):
barrier.arrive_inc(
mdQ_semaphore_cur[(m_block_max - 1, None)].iterator,
tidx,
cta_rank_in_cluster,
1,
)
if const_expr(
self.deterministic and not self.spt and block_info.window_size_left is not None
):
m_block_global_max = cute.ceil_div(seqlen.seqlen_q, self.tile_m)
for m_block in cutlass.range(m_block_max, m_block_global_max, unroll=1):
barrier.arrive_inc(
mdQ_semaphore_cur[(m_block, None)].iterator, tidx, cta_rank_in_cluster, 1
)
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
@cute.jit
def epilogue_dKV(
self,
tidx: Int32,
warp_idx: Int32,
batch_idx: Int32,
head_idx: Int32,
n_block: Int32,
seqlen,
thr_mma_dV: cute.core.ThrMma,
thr_mma_dK: cute.core.ThrMma,
tdVtdV: cute.Tensor,
tdKtdK: cute.Tensor,
mdV: cute.Tensor,
mdK: cute.Tensor,
pipeline_dKV: PipelineAsync,
consumer_state_dKV: cutlass.pipeline.PipelineState,
softmax_scale: Float32,
):
wg_idx = (
cute.arch.thread_idx()[0] % (cute.arch.WARP_SIZE * len(self.compute_warp_ids))
) // 128
num_wg = cute.arch.WARP_SIZE * len(self.compute_warp_ids) // 128
assert self.qhead_per_kvhead == 1, "This epilogue path is only for MHA"
mdV_cur = seqlen.offset_batch_K(mdV, batch_idx, dim=3)[None, None, head_idx]
mdK_cur = seqlen.offset_batch_K(mdK, batch_idx, dim=3)[None, None, head_idx]
tmem_load_atom = cute.make_copy_atom(
tcgen05.copy.Ld32x32bOp(tcgen05.copy.Repetition(16)), Float32
)
# dV
pipeline_dKV.consumer_wait(consumer_state_dKV)
tiled_tmem_ld_dV = tcgen05.make_tmem_copy(tmem_load_atom, tdVtdV)
thr_tmem_ld_dV = tiled_tmem_ld_dV.get_slice(tidx)
tdVtdV_t2r_p = thr_tmem_ld_dV.partition_S(tdVtdV)
tdVtdV_t2r = self.split_wg(tdVtdV_t2r_p, wg_idx, num_wg)
cdV = cute.make_identity_tensor((self.mma_tiler_pdo[0], self.mma_tiler_pdo[1]))
tdVcdV = thr_mma_dV.partition_C(cdV)
tdVcdV_tensor = cute.make_tensor(tdVcdV.iterator, tdVcdV.layout)
tdVcdV_t2r_p = thr_tmem_ld_dV.partition_D(tdVcdV_tensor)
tdVcdV_t2r = self.split_wg(tdVcdV_t2r_p, wg_idx, num_wg)
tdVrdV_t2r = cute.make_fragment(tdVcdV_t2r.shape, Float32)
cute.copy(thr_tmem_ld_dV, tdVtdV_t2r, tdVrdV_t2r)
cute.arch.fence_view_async_tmem_load()
universal_copy_bits = 128
atom_universal_copy = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(),
self.dv_dtype,
num_bits_per_copy=universal_copy_bits,
)
tiled_gmem_store_dV = cute.make_tiled_copy(
atom_universal_copy,
layout_tv=tiled_tmem_ld_dV.layout_dst_tv_tiled,
tiler_mn=tiled_tmem_ld_dV.tiler_mn,
)
tdVrdV_r2s = cute.make_fragment(tdVrdV_t2r.shape, self.dv_dtype)
for i in cutlass.range_constexpr(cute.size(tdVrdV_t2r, mode=[1])):
dV_vec = tdVrdV_t2r[(None, i, 0, 0)].load()
tdVrdV_r2s[(None, i, 0, 0)].store(dV_vec.to(self.dv_dtype))
gdV = cute.local_tile(mdV_cur, (self.mma_tiler_pdo[0], self.tile_hdimv), (None, 0))
gdV_tile = gdV[None, None, n_block // self.cta_group_size]
tdVgdV = thr_mma_dV.partition_C(gdV_tile)
tdVgdV_r2g_p = thr_tmem_ld_dV.partition_D(tdVgdV)
tdVgdV_r2g = self.split_wg(tdVgdV_r2g_p, wg_idx, num_wg)
if tidx < seqlen.seqlen_k - self.tile_n * n_block:
cute.copy(tiled_gmem_store_dV, tdVrdV_r2s, tdVgdV_r2g)
cute.arch.sync_warp()
with cute.arch.elect_one():
pipeline_dKV.consumer_release(consumer_state_dKV)
consumer_state_dKV.advance()
# dK
pipeline_dKV.consumer_wait(consumer_state_dKV)
tiled_tmem_ld_dK = tcgen05.make_tmem_copy(tmem_load_atom, tdKtdK)
thr_tmem_ld_dK = tiled_tmem_ld_dK.get_slice(tidx)
tdKtdK_t2r_p = thr_tmem_ld_dK.partition_S(tdKtdK)
tdKtdK_t2r = self.split_wg(tdKtdK_t2r_p, wg_idx, num_wg)
cdK = cute.make_identity_tensor((self.mma_tiler_dsq[0], self.mma_tiler_dsq[1]))
tdKcdK = thr_mma_dK.partition_C(cdK)
tdKcdK_tensor = cute.make_tensor(tdKcdK.iterator, tdKcdK.layout)
tdKcdK_t2r_p = thr_tmem_ld_dK.partition_D(tdKcdK_tensor)
tdKcdK_t2r = self.split_wg(tdKcdK_t2r_p, wg_idx, num_wg)
tdKrdK_t2r = cute.make_fragment(tdKcdK_t2r.shape, Float32)
cute.copy(tiled_tmem_ld_dK, tdKtdK_t2r, tdKrdK_t2r)
cute.arch.fence_view_async_tmem_load()
universal_copy_bits = 128
atom_universal_copy = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(),
self.dk_dtype,
num_bits_per_copy=universal_copy_bits,
)
tiled_gmem_store_dK = cute.make_tiled_copy(
atom_universal_copy,
layout_tv=tiled_tmem_ld_dK.layout_dst_tv_tiled,
tiler_mn=tiled_tmem_ld_dK.tiler_mn,
)
tdKrdK_r2s = cute.make_fragment(tdKrdK_t2r.shape, self.dk_dtype)
for i in cutlass.range_constexpr(cute.size(tdKrdK_t2r, mode=[1])):
dK_vec = tdKrdK_t2r[(None, i, 0, 0)].load() * softmax_scale
tdKrdK_r2s[(None, i, 0, 0)].store(dK_vec.to(self.dk_dtype))
gdK = cute.local_tile(mdK_cur, (self.mma_tiler_dsq[0], self.tile_hdim), (None, 0))
gdK_tile = gdK[None, None, n_block // self.cta_group_size]
tdKgdK = thr_mma_dK.partition_C(gdK_tile)
tdKgdK_r2g_p = thr_tmem_ld_dK.partition_D(tdKgdK)
tdKgdK_r2g = self.split_wg(tdKgdK_r2g_p, wg_idx, num_wg)
if tidx < seqlen.seqlen_k - self.tile_n * n_block:
cute.copy(tiled_gmem_store_dK, tdKrdK_r2s, tdKgdK_r2g)
cute.arch.sync_warp()
with cute.arch.elect_one():
pipeline_dKV.consumer_release(consumer_state_dKV)
return consumer_state_dKV
@cute.jit
def epilogue_dK_or_dV_tma(
self,
tidx: Int32,
batch_idx: Int32,
head_idx: Int32,
n_block: Int32,
seqlen,
thr_mma: cute.core.ThrMma,
tdKVtdKV: cute.Tensor,
mdKV: cute.Tensor,
sdKV: cute.Tensor,
tma_atom_dKV: cute.CopyAtom,
thr_copy_r2s_dKV: cute.TiledCopy,
pipeline_dKV: PipelineAsync,
consumer_state_dKV: cutlass.pipeline.PipelineState,
scale: Optional[Float32],
barrier_id: Int32,
mdKV_semaphore: Optional[cute.Tensor],
K_or_V: cutlass.Constexpr[str],
) -> cutlass.pipeline.PipelineState:
assert K_or_V in ("K", "V")
tile_hdim = self.tile_hdim if const_expr(K_or_V == "K") else self.tile_hdimv
dtype = self.dk_dtype if const_expr(K_or_V == "K") else self.dv_dtype
epi_tile = self.sdK_epi_tile if const_expr(K_or_V == "K") else self.sdV_epi_tile
flat_epi_tile = (
self.sdK_flat_epi_tile if const_expr(K_or_V == "K") else self.sdV_flat_epi_tile
)
num_compute_threads = cute.arch.WARP_SIZE * len(self.compute_warp_ids)
wg_idx = (cute.arch.thread_idx()[0] % num_compute_threads) // 128
num_wg = num_compute_threads // 128
leader_warp = (cute.arch.make_warp_uniform(cute.arch.warp_idx()) % 4) == 0
cta_group_tile_n = const_expr(self.tile_n * self.cta_group_size)
if const_expr(not self.dKV_postprocess):
sdKV = sdKV[None, None, wg_idx] # (tile_n, 64) for bf16
else:
sdKV = sdKV[None, wg_idx] # (tile_n * 32) for fp32
# (8, tile_n / 128, 64 / 8) = (8, 1, 8) or (4, tile_n * 32 / (128 * 4)) = (4, 8)
tdKVsdKV_r2s = thr_copy_r2s_dKV.partition_D(sdKV)
head_idx_kv = head_idx // self.qhead_per_kvhead
if const_expr(not self.dKV_postprocess):
assert not seqlen.has_cu_seqlens_k, "varlen uses non tma store path"
mdKV_cur = mdKV[None, None, head_idx_kv, batch_idx] # (seqlen, hdim)
gdKV_p = cute.local_tile(
mdKV_cur, (self.tile_n, tile_hdim), (n_block, 0)
) # (tile_n, hdim) - per CTA
gdKV = self.split_wg(gdKV_p, wg_idx, num_wg) # (tile_n, hdim / 2)
gdKV_epi = cute.local_tile(
gdKV, epi_tile, (0, None)
) # (tile_n, 64, epi_stage = (hdim / 2) / 64)
else:
# n_block_group = n_block // self.cta_group_size
if const_expr(not seqlen.has_cu_seqlens_k):
mdKV_cur = mdKV[None, head_idx_kv, batch_idx] # (seqlen * hdim)
else:
mdKV_cur = cute.domain_offset(
(seqlen.padded_offset_k * tile_hdim,), mdKV[None, head_idx_kv]
)
gdKV_p = cute.local_tile(
mdKV_cur, (self.tile_n * tile_hdim,), (n_block,)
) # (tile_n * hdim)
gdKV = cute.logical_divide(gdKV_p, (self.tile_n * tile_hdim // num_wg,))[
((None, wg_idx),)
] # (tile_n * hdim / 2)
gdKV_epi = cute.flat_divide(
gdKV, (flat_epi_tile,)
) # (tile_n * hdim / 2 / epi_stage, epi_stage)
deterministic_KV = self.deterministic and self.qhead_per_kvhead > 1
if const_expr(deterministic_KV):
mdKV_semaphore_cur = mdKV_semaphore[n_block, None, head_idx_kv, batch_idx]
if const_expr(not self.dKV_postprocess):
tdKVsdKV, tdKVgdKV = cpasync.tma_partition(
tma_atom_dKV,
0, # no multicast
cute.make_layout(1),
cute.group_modes(sdKV, 0, 2),
cute.group_modes(gdKV_epi, 0, 2),
) # (TMA) and (TMA, EPI_STAGE)
assert len(tdKVsdKV.shape) == 1, "Wrong rank for SMEM fragment tdKVsdKV"
assert len(tdKVgdKV.shape) == 2, "Wrong rank for GMEM fragment tdKVgdKV"
num_epi_stages = cute.size(tdKVgdKV.shape[1])
if const_expr(K_or_V == "K"):
assert num_epi_stages == self.num_epi_stages, "Epi stage calculation is wrong (K)"
else:
assert num_epi_stages == self.num_epi_stages_v, "Epi stage calculation is wrong (V)"
else:
num_epi_stages = (
self.num_epi_stages if const_expr(K_or_V == "K") else self.num_epi_stages_v
)
tmem_load_atom = cute.make_copy_atom(
tcgen05.copy.Ld32x32bOp(tcgen05.copy.Repetition(self.dK_reduce_ncol)), Float32
)
read_flag = const_expr(not deterministic_KV)
pipeline_dKV.consumer_wait(consumer_state_dKV)
# semaphore acquire
if const_expr(deterministic_KV):
barrier.wait_eq(
mdKV_semaphore_cur.iterator, tidx, wg_idx, head_idx % self.qhead_per_kvhead
)
cute.arch.barrier(barrier_id=barrier_id + wg_idx, number_of_threads=128)
for epi_stage in cutlass.range_constexpr(num_epi_stages):
# TMEM -> RMEM -- setup
thr_copy_t2r = tcgen05.make_tmem_copy(tmem_load_atom, tdKVtdKV).get_slice(tidx)
tdKVtdKV_t2r_p = thr_copy_t2r.partition_S(tdKVtdKV)
tdKVtdKV_t2r = self.split_wg(tdKVtdKV_t2r_p, wg_idx, num_wg)[None, None, 0, 0]
if const_expr(num_epi_stages > 1):
tdKVtdKV_t2r = tdKVtdKV_t2r[None, epi_stage]
cdKV = cute.make_identity_tensor((cta_group_tile_n, tile_hdim))
tdKVcdKV = thr_mma.partition_C(cdKV)
tdKVcdKV_t2r_p = thr_copy_t2r.partition_D(tdKVcdKV)
tdKVcdKV_t2r = self.split_wg(tdKVcdKV_t2r_p, wg_idx, num_wg)[None, None, 0, 0]
if const_expr(num_epi_stages > 1):
tdKVcdKV_t2r = tdKVcdKV_t2r[None, epi_stage]
tdKVrdKV_t2r = cute.make_fragment(tdKVcdKV_t2r.shape, Float32)
assert cute.size(tdKVrdKV_t2r) == cute.size(tdKVtdKV_t2r) // cute.arch.WARP_SIZE, (
"RMEM<->TMEM fragment size mismatch"
)
# TMEM -> RMEM -- copy and fence
cute.copy(thr_copy_t2r, tdKVtdKV_t2r, tdKVrdKV_t2r)
cute.arch.fence_view_async_tmem_load()
# RMEM -- scale and convert
if const_expr(scale is not None):
for i in cutlass.range(cute.size(tdKVrdKV_t2r.shape) // 2, unroll_full=True):
tdKVrdKV_t2r[2 * i], tdKVrdKV_t2r[2 * i + 1] = cute.arch.mul_packed_f32x2(
(tdKVrdKV_t2r[2 * i], tdKVrdKV_t2r[2 * i + 1]), (scale, scale)
)
tdKVrdKV = cute.make_fragment(tdKVrdKV_t2r.shape, dtype) # (32 columns)
tdKVrdKV.store(tdKVrdKV_t2r.load().to(dtype))
# RMEM -> SMEM -- copy, fence and barrier
tdKVrdKV_r2s = cute.make_tensor(tdKVrdKV.iterator, tdKVsdKV_r2s.shape)
cute.copy(thr_copy_r2s_dKV, tdKVrdKV_r2s, tdKVsdKV_r2s)
cute.arch.fence_view_async_shared()
cute.arch.barrier(barrier_id=barrier_id + wg_idx, number_of_threads=128)
# SMEM -> GMEM
if leader_warp:
if const_expr(not self.dKV_postprocess):
cute.copy(tma_atom_dKV, tdKVsdKV, tdKVgdKV[None, epi_stage])
else:
with cute.arch.elect_one():
copy_utils.cpasync_reduce_bulk_add_f32(
sdKV.iterator,
gdKV_epi[None, epi_stage].iterator,
self.tma_copy_bytes["dKacc"],
)
if const_expr(epi_stage < num_epi_stages - 1):
cute.arch.cp_async_bulk_commit_group()
cute.arch.cp_async_bulk_wait_group(0, read=read_flag)
cute.arch.barrier_arrive(
barrier_id=barrier_id + wg_idx, number_of_threads=128 + cute.arch.WARP_SIZE
)
# Barrier since all warps need to wait for SMEM to be freed
cute.arch.fence_view_async_shared()
cute.arch.barrier(
barrier_id=barrier_id + wg_idx, number_of_threads=128 + cute.arch.WARP_SIZE
)
# semaphore release
# NOTE: arrive_inc calls red_release which issues membar
if const_expr(deterministic_KV):
if leader_warp:
cute.arch.cp_async_bulk_commit_group()
cute.arch.cp_async_bulk_wait_group(0, read=read_flag)
cute.arch.barrier(barrier_id=barrier_id + wg_idx, number_of_threads=128)
barrier.arrive_inc(mdKV_semaphore_cur.iterator, tidx, wg_idx, 1)
cute.arch.sync_warp()
with cute.arch.elect_one():
pipeline_dKV.consumer_release(consumer_state_dKV)
consumer_state_dKV.advance()
return consumer_state_dKV
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/flash_bwd_sm100.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 3690,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:tests/cute/test_flash_attn_varlen.py | import itertools
from typing import Optional
from einops import rearrange
import pytest
import torch
import torch.nn.functional as F
from flash_attn.cute import flash_attn_varlen_func
IS_SM90 = torch.cuda.get_device_capability()[0] == 9
@pytest.mark.parametrize("B", [1, 7, 20])
@pytest.mark.parametrize("H", [1, 4, 6])
@pytest.mark.parametrize("D", [64, 128])
@pytest.mark.parametrize("min_seq_len", [1, 32, 128])
@pytest.mark.parametrize("max_seq_len", [8, 64, 2048])
@pytest.mark.parametrize("causal", [True, False])
@pytest.mark.parametrize("softmax_scale", [None, 0.1])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
def test_varlen(
B,
H,
D,
min_seq_len,
max_seq_len,
causal,
softmax_scale,
dtype,
mha_type,
):
if min_seq_len > max_seq_len:
pytest.skip("Skipping min_seq_len > max_seq_len")
q, k, v, cu_seqlens_q, cu_seqlens_k, total_q, total_k = generate_varlen_args(
batch_size=B,
n_heads=H,
d_head=D,
min_len=min_seq_len,
max_len=max_seq_len,
mha_type=mha_type,
dtype=dtype
)
# SM90 backward pass doesn't support varlen yet
skip_backward = IS_SM90
ok = check_varlen_vs_torch_flash(
q, k, v,
cu_seqlens_q, cu_seqlens_k,
total_q=total_q, total_k=total_k,
softmax_scale=softmax_scale,
causal=causal,
mha_type=mha_type,
skip_backward=skip_backward,
)
assert ok
def check_varlen_vs_torch_flash(
q, k, v,
cu_seqlens_q=None,
cu_seqlens_k=None,
seqused_q=None,
seqused_k=None,
total_q=None,
total_k=None,
softmax_scale=None,
causal=True,
mha_type='mha',
softcap=0.0,
atol=3e-2,
rtol=3e-2,
skip_backward=False,
):
assert q.requires_grad and k.requires_grad and v.requires_grad, "Set requires_grad=True on inputs"
def clone_like(t):
c = t.clone().detach().requires_grad_(True)
return c
q_fa, k_fa, v_fa = map(clone_like, (q, k, v))
q_t, k_t, v_t = map(clone_like, (q, k, v))
if cu_seqlens_q is not None:
cu_seqlens_q_fa = cu_seqlens_q.clone()
cu_seqlens_q_t = cu_seqlens_q.clone()
else:
cu_seqlens_q_fa = None
cu_seqlens_q_t = None
if cu_seqlens_k is not None:
cu_seqlens_k_fa = cu_seqlens_k.clone()
cu_seqlens_k_t = cu_seqlens_k.clone()
else:
cu_seqlens_k_fa = None
cu_seqlens_k_t = None
out_fa, lse_fa = flash_attn_varlen_func(
q_fa, k_fa, v_fa,
cu_seqlens_q=cu_seqlens_q_fa,
cu_seqlens_k=cu_seqlens_k_fa,
seqused_q=seqused_q,
seqused_k=seqused_k,
softmax_scale=(1.0 / q.shape[-1]**0.5) if softmax_scale is None else softmax_scale,
causal=causal,
window_size=(None, None),
learnable_sink=None,
softcap=softcap,
pack_gqa=None,
)
out_t = torch_flash_ref(
q_t, k_t, v_t,
cu_seqlens_q=cu_seqlens_q_t,
cu_seqlens_k=cu_seqlens_k_t,
seqused_q=seqused_q,
seqused_k=seqused_k,
total_q=total_q,
total_k=total_k,
softmax_scale=softmax_scale,
causal=causal,
mha_type=mha_type,
)
ok_fwd = torch.allclose(out_fa.float(), out_t.float(), atol=atol, rtol=rtol)
if not ok_fwd:
return False
# Skip backward if not supported (e.g., SM90 varlen)
if skip_backward:
return True
# Use the same upstream gradient to compare backward paths
grad_out = torch.randn_like(out_fa)
grad_fa = clone_like(grad_out)
grad_t = clone_like(grad_out)
# Cute bwd
out_fa.backward(grad_fa, retain_graph=False)
dq_fa, dk_fa, dv_fa = q_fa.grad, k_fa.grad, v_fa.grad
# Ref bwd
out_t.backward(grad_t, retain_graph=False)
dq_t, dk_t, dv_t = q_t.grad, k_t.grad, v_t.grad
# mean_ok_q = _stats("dQ", dq_fa, dq_t, atol=atol, rtol=rtol)
# mean_ok_k = _stats("dK", dk_fa, dk_t, atol=atol, rtol=rtol)
# mean_ok_v = _stats("dV", dv_fa, dv_t, atol=atol, rtol=rtol)
# return mean_ok_q and mean_ok_k and mean_ok_v
ok_q = torch.allclose(dq_fa.float(), dq_t.float(), atol=atol, rtol=rtol)
ok_k = torch.allclose(dk_fa.float(), dk_t.float(), atol=atol, rtol=rtol)
ok_v = torch.allclose(dv_fa.float(), dv_t.float(), atol=atol, rtol=rtol)
# print(f"Close? dQ={ok_q}, dK={ok_k}, dV={ok_v}")
return ok_q and ok_k and ok_v
def generate_varlen_args(
batch_size=8,
n_heads=16,
d_head=128,
min_len=32,
max_len=64,
mha_type="mha",
dtype = torch.bfloat16,
):
torch.manual_seed(0)
device = "cuda"
assert mha_type in ["mha", "mqa", "gqa"]
lens_q = torch.randint(low=min_len, high=max_len + 1, size=(batch_size,))
lens_k = lens_q.clone()
cu_seqlens_q = torch.cat([torch.zeros(1, dtype=torch.int32), lens_q.cumsum(0)])
cu_seqlens_k = torch.cat([torch.zeros(1, dtype=torch.int32), lens_k.cumsum(0)])
total_q = cu_seqlens_q[-1]
total_k = cu_seqlens_k[-1]
cu_seqlens_q = cu_seqlens_q.contiguous().to(dtype=torch.int32, device=device)
cu_seqlens_k = cu_seqlens_k.contiguous().to(dtype=torch.int32, device=device)
if mha_type == "gqa":
H = 3 * n_heads
H_kv = n_heads
elif mha_type == "mha":
H = H_kv = n_heads
else: # MQA
H = n_heads
H_kv = 1
d_head_v = d_head
q = torch.randn(total_q, H, d_head, device=device, dtype=dtype, requires_grad=True)
k = torch.randn(total_k, H_kv, d_head, device=device, dtype=dtype, requires_grad=True)
v = torch.randn(total_k, H_kv, d_head_v, device=device, dtype=dtype, requires_grad=True)
return q, k, v, cu_seqlens_q, cu_seqlens_k, total_q, total_k
# Simple for loop over batch dim implementation
def torch_flash_ref(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
cu_seqlens_q: torch.Tensor = None,
cu_seqlens_k: torch.Tensor = None,
total_q: int = 0,
total_k: int = 0,
softmax_scale: Optional[float] = None,
causal: bool = False,
**kwargs
):
"""
q: (total_q, H, d) if cu_seqlens_q is not None, otherwise (B, L, H, d)
k: (total_k, H_kv, d) if cu_seqlens_k is not None, otherwise (B, L, H_kv, d)
v: (total_k, H_kv, d_v) if cu_seqlens_k is not None, otherwise (B, L, H_kv, d_v)
cu_seqlens_q: (B+1,) int32, cumulative
cu_seqlens_k: (B+1,) int32, cumulative
seqused_q: (B+1,) int32
seqused_k: (B+1,) int32
Returns:
out packed like q: (total_q, H, d_v)
"""
if cu_seqlens_q is not None:
assert cu_seqlens_q.dim() == 1
assert total_q == q.shape[0]
assert q.dim() == 3
H = q.shape[1]
B = cu_seqlens_q.shape[0] - 1
else:
assert q.dim() == 4
H = q.shape[2]
B = q.shape[0]
if cu_seqlens_k is not None:
assert cu_seqlens_k.dim() == 1
assert total_k == k.shape[0] == v.shape[0]
assert k.dim() == v.dim() == 3
H_kv = k.shape[1]
B_kv = cu_seqlens_k.shape[0] - 1
else:
assert k.dim() == v.dim() == 4
assert k.shape[0] == v.shape[0]
H_kv = k.shape[2]
B_kv = k.shape[0]
d = q.shape[-1]
d_v = v.shape[-1]
assert H_kv == v.shape[-2]
assert d == k.shape[-1]
assert B == B_kv
assert q.device == k.device == v.device
assert q.is_floating_point() and k.is_floating_point() and v.is_floating_point()
device = q.device
dtype = q.dtype
hcseq_q = cu_seqlens_q.to(device='cpu')
hcseq_k = cu_seqlens_k.to(device='cpu')
outs = []
for b in range(B):
if hcseq_q is not None:
q_start, q_end = int(hcseq_q[b]), int(hcseq_q[b+1])
qb = q[q_start:q_end]
else:
qb = q[b]
if hcseq_k is not None:
k_start, k_end = int(hcseq_k[b]), int(hcseq_k[b+1])
kb = k[k_start:k_end]
vb = v[k_start:k_end]
else:
kb = k[b]
vb = v[b]
qb = qb.permute(1, 0, 2).unsqueeze(0)
kb = kb.permute(1, 0, 2).unsqueeze(0)
vb = vb.permute(1, 0, 2).unsqueeze(0)
ob = F.scaled_dot_product_attention(
qb, kb, vb,
attn_mask=None,
dropout_p=0.0,
is_causal=causal,
scale=softmax_scale,
enable_gqa=H_kv!=H
)
ob = ob.squeeze(0).permute(1, 0, 2).contiguous()
outs.append(ob)
if cu_seqlens_q is not None:
out = torch.cat(outs, dim=0).to(device=device, dtype=dtype)
else:
out = torch.stack(outs, dim=0).to(device=device, dtype=dtype)
return out
@torch.no_grad()
def _stats(name, a, b, atol, rtol):
diff = (a - b).float()
mean_abs = diff.abs().mean().item()
mean_rel = (diff.abs().mean() / b.abs().clamp_min(1e-6).mean().item())
print(f"{name}: mean_abs={mean_abs:.4e}, mean_rel={mean_rel:.4e}, sum_fa={a.sum()}, sum_ref={b.sum()}")
return mean_abs < atol and mean_rel < rtol | {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "tests/cute/test_flash_attn_varlen.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 263,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:flash_attn/cute/copy_utils.py | # Copyright (c) 2025, Wentao Guo, Ted Zadouri, Tri Dao.
import math
from typing import Optional, Type, Callable
import cutlass
import cutlass.cute as cute
from cutlass import Float32, Int32, const_expr
from cutlass.cute.nvgpu import cpasync
import cutlass.utils.blackwell_helpers as sm100_utils
from cutlass.cutlass_dsl import T, dsl_user_op
from cutlass._mlir.dialects import llvm
import cutlass.pipeline
@dsl_user_op
def cvt_copy(
atom: cute.CopyAtom,
src: cute.Tensor,
dst: cute.Tensor,
*,
pred: Optional[cute.Tensor] = None,
loc=None,
ip=None,
**kwargs,
) -> None:
assert isinstance(src.iterator, cute.Pointer) and src.memspace == cute.AddressSpace.rmem
if const_expr(src.element_type != dst.element_type):
src_cvt = cute.make_fragment_like(src, dst.element_type, loc=loc, ip=ip)
src_cvt.store(src.load().to(dst.element_type))
src = src_cvt
cute.copy(atom, src, dst, pred=pred, loc=loc, ip=ip, **kwargs)
@dsl_user_op
def load_s2r(src: cute.Tensor, *, loc=None, ip=None) -> cute.Tensor:
dst = cute.make_fragment_like(src, src.element_type, loc=loc, ip=ip)
cute.autovec_copy(src, dst, loc=loc, ip=ip)
return dst
@dsl_user_op
def get_copy_atom(
dtype: Type[cutlass.Numeric], num_copy_elems: int, is_async: bool = False, *, loc=None, ip=None
) -> cute.CopyAtom:
num_copy_bits = const_expr(min(128, num_copy_elems * dtype.width))
copy_op = cpasync.CopyG2SOp() if is_async else cute.nvgpu.CopyUniversalOp()
return cute.make_copy_atom(copy_op, dtype, num_bits_per_copy=num_copy_bits)
@dsl_user_op
def make_tmem_copy(
tmem_copy_atom: cute.CopyAtom, num_wg: int = 1, *, loc=None, ip=None
) -> cute.CopyAtom:
num_dp, num_bits, num_rep, _ = sm100_utils.get_tmem_copy_properties(tmem_copy_atom)
assert num_dp == 32
assert num_bits == 32
tiler_mn = (cute.make_layout((128 * num_rep * num_wg // 32, 32), stride=(32, 1)),)
layout_tv = cute.make_layout(
((32, 4, num_wg), (num_rep, 32)), stride=((0, 1, 4 * num_rep), (4, 4 * num_rep * num_wg))
)
return cute.make_tiled_copy(tmem_copy_atom, layout_tv, tiler_mn)
@dsl_user_op
def copy(
src: cute.Tensor,
dst: cute.Tensor,
*,
pred: Optional[cute.Tensor] = None,
num_copy_elems: int = 1,
is_async: bool = False,
loc=None,
ip=None,
**kwargs,
) -> None:
copy_atom = get_copy_atom(src.element_type, num_copy_elems, is_async)
cute.copy(copy_atom, src, dst, pred=pred, loc=loc, ip=ip, **kwargs)
def tiled_copy_1d(
dtype: Type[cutlass.Numeric], num_threads: int, num_copy_elems: int = 1, is_async: bool = False
) -> cute.TiledCopy:
num_copy_bits = num_copy_elems * dtype.width
copy_op = cpasync.CopyG2SOp() if is_async else cute.nvgpu.CopyUniversalOp()
copy_atom = cute.make_copy_atom(copy_op, dtype, num_bits_per_copy=num_copy_bits)
thr_layout = cute.make_layout(num_threads)
val_layout = cute.make_layout(num_copy_elems)
return cute.make_tiled_copy_tv(copy_atom, thr_layout, val_layout)
def tiled_copy_2d(
dtype: Type[cutlass.Numeric], major_mode_size: int, num_threads: int, is_async: bool = False
) -> cute.TiledCopy:
num_copy_bits = math.gcd(major_mode_size, 128 // dtype.width) * dtype.width
copy_elems = num_copy_bits // dtype.width
copy_op = cpasync.CopyG2SOp() if is_async else cute.nvgpu.CopyUniversalOp()
copy_atom = cute.make_copy_atom(copy_op, dtype, num_bits_per_copy=num_copy_bits)
gmem_threads_per_row = major_mode_size // copy_elems
assert num_threads % gmem_threads_per_row == 0
thr_layout = cute.make_ordered_layout(
(num_threads // gmem_threads_per_row, gmem_threads_per_row),
order=(1, 0),
)
val_layout = cute.make_layout((1, copy_elems))
return cute.make_tiled_copy_tv(copy_atom, thr_layout, val_layout)
@dsl_user_op
def atomic_add_fp32x4(
a: Float32, b: Float32, c: Float32, d: Float32, gmem_ptr: cute.Pointer, *, loc=None, ip=None
) -> None:
gmem_ptr_i64 = gmem_ptr.toint(loc=loc, ip=ip).ir_value()
# cache_hint = cutlass.Int64(0x12F0000000000000)
llvm.inline_asm(
None,
[
gmem_ptr_i64,
Float32(a).ir_value(loc=loc, ip=ip),
Float32(b).ir_value(loc=loc, ip=ip),
Float32(c).ir_value(loc=loc, ip=ip),
Float32(d).ir_value(loc=loc, ip=ip),
],
# [gmem_ptr_i64, Float32(a).ir_value(loc=loc, ip=ip), cache_hint.ir_value()],
"{\n\t"
# ".reg .b128 abcd;\n\t"
# "mov.b128 abcd, {$1, $2, $3, $4};\n\t"
".reg .v4 .f32 abcd;\n\t"
# "mov.b128 abcd, {$1, $2, $3, $4};\n\t"
"mov.f32 abcd.x, $1;\n\t"
"mov.f32 abcd.y, $2;\n\t"
"mov.f32 abcd.z, $3;\n\t"
"mov.f32 abcd.w, $4;\n\t"
"red.global.add.v4.f32 [$0], abcd;\n\t"
# "red.global.add.L2::cache_hint.v4.f32 [$0], abcd, 0x14F0000000000000;\n\t"
"}\n",
# "red.global.add.L2::cache_hint.f32 [$0], $1, 0x12F0000000000000;",
# "red.global.add.L2::cache_hint.f32 [$0], $1, $2;",
"l,f,f,f,f",
# "l,f,l",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@dsl_user_op
def set_block_rank(
smem_ptr: cute.Pointer, peer_cta_rank_in_cluster: Int32, *, loc=None, ip=None
) -> Int32:
"""Map the given smem pointer to the address at another CTA rank in the cluster."""
smem_ptr_i32 = smem_ptr.toint(loc=loc, ip=ip).ir_value()
return Int32(
llvm.inline_asm(
T.i32(),
[smem_ptr_i32, peer_cta_rank_in_cluster.ir_value()],
"mapa.shared::cluster.u32 $0, $1, $2;",
"=r,r,r",
has_side_effects=False,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
)
@dsl_user_op
def store_shared_remote_fp32x4(
a: Float32,
b: Float32,
c: Float32,
d: Float32,
smem_ptr: cute.Pointer,
mbar_ptr: cute.Pointer,
peer_cta_rank_in_cluster: Int32,
*,
loc=None,
ip=None,
) -> None:
remote_smem_ptr_i32 = set_block_rank(
smem_ptr, peer_cta_rank_in_cluster, loc=loc, ip=ip
).ir_value()
remote_mbar_ptr_i32 = set_block_rank(
mbar_ptr, peer_cta_rank_in_cluster, loc=loc, ip=ip
).ir_value()
llvm.inline_asm(
None,
[
remote_smem_ptr_i32,
remote_mbar_ptr_i32,
Float32(a).ir_value(loc=loc, ip=ip),
Float32(b).ir_value(loc=loc, ip=ip),
Float32(c).ir_value(loc=loc, ip=ip),
Float32(d).ir_value(loc=loc, ip=ip),
],
"{\n\t"
".reg .v4 .f32 abcd;\n\t"
"mov.f32 abcd.x, $2;\n\t"
"mov.f32 abcd.y, $3;\n\t"
"mov.f32 abcd.z, $4;\n\t"
"mov.f32 abcd.w, $5;\n\t"
"st.async.shared::cluster.mbarrier::complete_tx::bytes.v4.f32 [$0], abcd, [$1];\n\t"
"}\n",
"r,r,f,f,f,f",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@dsl_user_op
def cpasync_bulk_s2cluster(
smem_src_ptr: cute.Pointer,
smem_dst_ptr: cute.Pointer,
mbar_ptr: cute.Pointer,
size: int | Int32,
peer_cta_rank_in_cluster: Int32,
*,
loc=None,
ip=None,
):
smem_src_ptr_i32 = smem_src_ptr.toint(loc=loc, ip=ip).ir_value()
smem_dst_ptr_i32 = set_block_rank(
smem_dst_ptr, peer_cta_rank_in_cluster, loc=loc, ip=ip
).ir_value()
mbar_ptr_i32 = set_block_rank(mbar_ptr, peer_cta_rank_in_cluster, loc=loc, ip=ip).ir_value()
llvm.inline_asm(
None,
[
smem_dst_ptr_i32,
smem_src_ptr_i32,
mbar_ptr_i32,
Int32(size).ir_value(loc=loc, ip=ip),
],
"cp.async.bulk.shared::cluster.shared::cta.mbarrier::complete_tx::bytes [$0], [$1], $3, [$2];",
"r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@dsl_user_op
def cpasync_bulk_g2s(
gmem_ptr: cute.Pointer,
smem_ptr: cute.Pointer,
tma_bar_ptr: cute.Pointer,
size: int | Int32,
*,
loc=None,
ip=None,
):
gmem_ptr_i64 = gmem_ptr.toint(loc=loc, ip=ip).ir_value()
smem_ptr_i32 = smem_ptr.toint(loc=loc, ip=ip).ir_value()
mbar_ptr_i32 = tma_bar_ptr.toint(loc=loc, ip=ip).ir_value()
llvm.inline_asm(
None,
[gmem_ptr_i64, smem_ptr_i32, mbar_ptr_i32, Int32(size).ir_value()],
"cp.async.bulk.shared::cta.global.mbarrier::complete_tx::bytes [$1], [$0], $3, [$2];",
"l,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@dsl_user_op
def cpasync_reduce_bulk_add_f32(
smem_ptr: cute.Pointer,
gmem_ptr: cute.Pointer,
store_bytes: int | Int32,
*,
loc=None,
ip=None,
):
smem_ptr_i32 = smem_ptr.toint(loc=loc, ip=ip).ir_value()
# cache_hint = cutlass.Int64(0x14F0000000000000) # EVICT_LAST
llvm.inline_asm(
None,
[gmem_ptr.llvm_ptr, smem_ptr_i32, Int32(store_bytes).ir_value()],
"cp.reduce.async.bulk.global.shared::cta.bulk_group.add.f32 [$0], [$1], $2;",
"l,r,r",
# [gmem_ptr.llvm_ptr, smem_ptr_i32, Int32(store_bytes).ir_value(), cache_hint.ir_value()],
# "cp.reduce.async.bulk.global.shared::cta.bulk_group.L2::cache_hint.add.f32 [$0], [$1], $2, $3;",
# "l,r,r,l",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
def cpasync_bulk_get_copy_fn(
src_tensor: cute.Tensor,
dst_tensor: cute.Tensor,
single_stage: bool = False,
**kwargs,
) -> Callable:
# src_is_smem = const_expr(
# isinstance(src_tensor.iterator, cute.Pointer)
# and src_tensor.memspace == cute.AddressSpace.smem
# )
group_rank_src = const_expr(cute.rank(src_tensor) - (1 if not single_stage else 0))
group_rank_dst = const_expr(cute.rank(dst_tensor) - (1 if not single_stage else 0))
# ((atom_v, rest_v), STAGE), ((atom_v, rest_v), RestK)
src = cute.group_modes(src_tensor, 0, group_rank_src)
dst = cute.group_modes(dst_tensor, 0, group_rank_dst)
def copy_bulk(src_idx, dst_idx, **new_kwargs):
size = const_expr(cute.size(src.shape[:-1]) * src.element_type.width // 8)
cpasync_bulk_g2s(
src[None, src_idx].iterator,
dst[None, dst_idx].iterator,
size=size,
**new_kwargs,
**kwargs,
)
def copy_bulk_single_stage(**new_kwargs):
size = const_expr(cute.size(src.shape) * src.element_type.width // 8)
cpasync_bulk_g2s(src.iterator, dst.iterator, size=size, **new_kwargs, **kwargs)
return copy_bulk if const_expr(not single_stage) else copy_bulk_single_stage
def tma_get_copy_fn(
atom: cute.CopyAtom,
cta_coord: cute.Coord,
cta_layout: cute.Layout,
src_tensor: cute.Tensor,
dst_tensor: cute.Tensor,
filter_zeros: bool = False,
single_stage: bool = False,
**kwargs,
) -> Callable:
src_is_smem = const_expr(
isinstance(src_tensor.iterator, cute.Pointer)
and src_tensor.memspace == cute.AddressSpace.smem
)
smem_tensor, gmem_tensor = (src_tensor, dst_tensor) if src_is_smem else (dst_tensor, src_tensor)
group_rank_smem = const_expr(cute.rank(smem_tensor) - (1 if not single_stage else 0))
group_rank_gmem = const_expr(cute.rank(gmem_tensor) - (1 if not single_stage else 0))
# ((atom_v, rest_v), STAGE), ((atom_v, rest_v), RestK)
s, g = cpasync.tma_partition(
atom,
cta_coord,
cta_layout,
cute.group_modes(smem_tensor, 0, group_rank_smem),
cute.group_modes(gmem_tensor, 0, group_rank_gmem),
)
if const_expr(filter_zeros):
s = cute.filter_zeros(s)
g = cute.filter_zeros(g)
src, dst = (s, g) if src_is_smem else (g, s)
def copy_tma(src_idx, dst_idx, **new_kwargs):
cute.copy(atom, src[None, src_idx], dst[None, dst_idx], **new_kwargs, **kwargs)
def copy_tma_single_stage(**new_kwargs):
cute.copy(atom, src, dst, **new_kwargs, **kwargs)
return (copy_tma if const_expr(not single_stage) else copy_tma_single_stage), s, g
def tma_producer_copy_fn(copy: Callable, pipeline: cutlass.pipeline.PipelineAsync):
def copy_fn(src_idx, producer_state: cutlass.pipeline.PipelineState, **new_kwargs):
copy(
src_idx=src_idx,
dst_idx=producer_state.index,
tma_bar_ptr=pipeline.producer_get_barrier(producer_state),
**new_kwargs,
)
return copy_fn
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/copy_utils.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/cute_dsl_utils.py | # Copyright (c) 2025, Tri Dao.
import os
import pathlib
from typing import Tuple
from functools import partial, lru_cache
from dataclasses import dataclass, fields
import torch
try:
from triton.tools.disasm import extract
except ImportError:
extract = None
import cutlass
import cutlass.cute as cute
from cutlass.base_dsl.typing import JitArgument
from cutlass.cutlass_dsl import NumericMeta
from cutlass.cute.runtime import from_dlpack
StaticTypes = (cutlass.Constexpr, NumericMeta, int, bool, str, float, type(None))
load_cubin_module_data_og = cutlass.base_dsl.runtime.cuda.load_cubin_module_data
cute_compile_og = cute.compile
torch2cute_dtype_map = {
torch.float16: cutlass.Float16,
torch.bfloat16: cutlass.BFloat16,
torch.float32: cutlass.Float32,
}
@lru_cache
def get_max_active_clusters(cluster_size):
return cutlass.utils.HardwareInfo().get_max_active_clusters(cluster_size=cluster_size)
@lru_cache
def get_device_capacity(device: torch.device = None) -> Tuple[int, int]:
return torch.cuda.get_device_capability(device)
@dataclass
class ArgumentsBase(JitArgument):
def __c_pointers__(self):
all_fields = [getattr(self, field.name) for field in fields(self)]
non_constexpr_fields = [f for f in all_fields if not isinstance(f, StaticTypes)]
c_ptrs = []
for obj in non_constexpr_fields:
if hasattr(obj, "__c_pointers__"):
c_ptrs.extend(obj.__c_pointers__())
return c_ptrs
def __get_mlir_types__(self):
all_fields = [getattr(self, field.name) for field in fields(self)]
non_constexpr_fields = [f for f in all_fields if not isinstance(f, StaticTypes)]
types, self._values_pos = [], []
for obj in non_constexpr_fields:
if hasattr(obj, "__get_mlir_types__"):
obj_types = obj.__get_mlir_types__()
types.extend(obj_types)
self._values_pos.append(len(obj_types))
else:
self._values_pos.append(0)
return types
def __new_from_mlir_values__(self, values):
all_fields = {field.name: getattr(self, field.name) for field in fields(self)}
constexpr_fields = {n: f for n, f in all_fields.items() if isinstance(f, StaticTypes)}
non_constexpr_fields = {
n: f for n, f in all_fields.items() if not isinstance(f, StaticTypes)
}
for (name, field), n_items in zip(non_constexpr_fields.items(), self._values_pos):
non_constexpr_fields[name] = cutlass.new_from_mlir_values(field, values[:n_items])
values = values[n_items:]
return self.__class__(**non_constexpr_fields, **constexpr_fields)
def load_cubin_module_data_patched(cubin_data, filepath):
pathlib.Path(filepath).write_bytes(cubin_data)
return load_cubin_module_data_og(cubin_data)
def cute_compile_patched(*args, **kwargs):
"""A patched version of cute.compile that dump the SASS to a file if CUTE_CUBIN_PATH is set."""
cubin_path = os.getenv("CUTE_CUBIN_PATH", None)
if cubin_path is not None:
cutlass.base_dsl.runtime.cuda.load_cubin_module_data = partial(
load_cubin_module_data_patched, filepath=cubin_path
)
output = cute_compile_og(*args, **kwargs)
if cubin_path is not None:
cutlass.base_dsl.runtime.cuda.load_cubin_module_data = load_cubin_module_data_og
if extract is not None:
sass = extract(cubin_path, None)
pathlib.Path(cubin_path).with_suffix(".annotated.sass").write_text(sass)
return output
def assume_strides_aligned(t):
"""Assume all strides except the last are divisible by 128 bits.
Python int strides (e.g., stride=0 from GQA expand) are kept as-is
since they're static and don't need alignment assumptions.
"""
divby = 128 // t.element_type.width
strides = tuple(s if isinstance(s, int) else cute.assume(s, divby=divby) for s in t.stride[:-1])
return (*strides, t.stride[-1])
def assume_tensor_aligned(t):
"""Rebuild a tensor with 128-bit aligned stride assumptions. Passes through None."""
if t is None:
return None
return cute.make_tensor(t.iterator, cute.make_layout(t.shape, stride=assume_strides_aligned(t)))
def to_cute_tensor(t, assumed_align=16, leading_dim=-1, fully_dynamic=False, enable_tvm_ffi=True):
"""Convert torch tensor to cute tensor for TVM FFI. leading_dim=-1 defaults to t.ndim-1."""
tensor = from_dlpack(t.detach(), assumed_align=assumed_align, enable_tvm_ffi=enable_tvm_ffi)
if fully_dynamic:
return tensor.mark_layout_dynamic()
if leading_dim == -1:
leading_dim = t.ndim - 1
return tensor.mark_layout_dynamic(leading_dim=leading_dim)
def to_cute_aux_tensor(t, enable_tvm_ffi=True):
"""Convert torch tensor to cute tensor for TVM FFI, tailored to FlexAttention aux tensors.
This allows the user to specify alignment and leading dimension for aux tensors used in
custom score_mod callables.
"""
assumed_align: int = getattr(t, "__assumed_align__", None)
leading_dim: int = getattr(t, "__leading_dim__", None)
fully_dynamic: bool = leading_dim is None
return to_cute_tensor(
t,
assumed_align=assumed_align,
leading_dim=leading_dim,
fully_dynamic=fully_dynamic,
enable_tvm_ffi=enable_tvm_ffi,
)
def get_aux_tensor_metadata(aux_tensors):
return tuple(
(
getattr(t, "__assumed_align__", 0),
getattr(t, "__leading_dim__", -1),
hasattr(t, "__leading_dim__"),
)
for t in aux_tensors
)
def get_broadcast_dims(tensor: torch.Tensor) -> Tuple[bool, ...]:
"""Return tuple of bools indicating which dims have stride=0 (broadcast).
This is useful for compile keys since CuTe's mark_layout_dynamic() keeps
stride=0 as static, meaning kernels compiled with different broadcast
patterns are not interchangeable.
"""
return tuple(s == 0 for s in tensor.stride())
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/cute_dsl_utils.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:tests/cute/test_score_mod.py | import pytest
import torch
import cutlass
import cutlass.cute as cute
from cutlass._mlir.dialects import math as mlir_math
import operator
from torch.nn.attention.flex_attention import flex_attention
from flash_attn.cute.interface import _flash_attn_fwd, _flash_attn_bwd
COMPUTE_CAPABILITY = torch.cuda.get_device_capability()[0]
from score_mod_definitions import (
# TensorSSA-based score mods
score_mod_identity as score_mod_1,
score_mod_causal as score_mod_2,
score_mod_rel_bias as score_mod_3,
score_mod_rel_bias_x2 as score_mod_4,
score_mod_times_two as score_mod_5,
score_mod_alibi as score_mod_6,
score_mod_sliding_window as score_mod_7,
score_mod_block_diagonal as score_mod_8,
score_mod_causal_v2 as score_mod_9,
score_mod_batch_bias as score_mod_10,
score_mod_dual_buffer as score_mod_11,
) # isort: split
from score_mod_definitions import (
score_mod_identity_vectorized as score_mod_1_vectorized,
score_mod_causal_vectorized as score_mod_2_vectorized,
score_mod_rel_bias as score_mod_3_vectorized,
score_mod_rel_bias_x2_vectorized as score_mod_4_vectorized,
score_mod_times_two_vectorized as score_mod_5_vectorized,
score_mod_alibi_vectorized as score_mod_6_vectorized,
score_mod_batch_bias_vectorized as score_mod_10_vectorized,
score_mod_dual_buffer_vectorized as score_mod_11_vectorized,
) # isort: split
from score_mod_definitions import (
# Eager (torch) reference score mods
identity_eager,
causal_eager as causal_mask_eager,
rel_bias_eager as relative_bias_eager,
rel_bias_x2_eager as relative_bias_v2_eager,
times_two_eager,
alibi_eager as alibi_bias_eager,
sliding_window_eager,
block_diagonal_eager,
causal_v2_eager as causal_mask_v2_eager,
batch_bias_factory as batch_bias,
dual_buffer_factory as dual_buffer_bias,
)
COMPUTE_CAPABILITY = torch.cuda.get_device_capability()[0]
# Test pairs: (cute_jit_function, eager_reference_function)
TEST_PAIRS = [
(score_mod_1, None),
(score_mod_2, causal_mask_eager),
(score_mod_3, relative_bias_eager),
(score_mod_4, relative_bias_v2_eager),
(score_mod_5, times_two_eager),
(score_mod_6, alibi_bias_eager),
(score_mod_7, sliding_window_eager),
(score_mod_8, block_diagonal_eager),
(score_mod_9, causal_mask_v2_eager),
]
# Test pairs with aux_tensors: (cute_jit_function, eager_reference_function_factory)
TEST_PAIRS_WITH_AUX_TENSORS = [
(score_mod_10, batch_bias),
(score_mod_11, dual_buffer_bias),
]
# Test pairs to compare vectorized score_mods: (cute_jit_function, cute_jit_function_vectorized)
TEST_PAIRS_VECTORIZED = [
(score_mod_1, score_mod_1_vectorized),
(score_mod_2, score_mod_2_vectorized),
(score_mod_3, score_mod_3_vectorized),
(score_mod_4, score_mod_4_vectorized),
(score_mod_5, score_mod_5_vectorized),
(score_mod_6, score_mod_6_vectorized),
]
TEST_PAIRS_WITH_AUX_TENSORS_VECTORIZED = [
(score_mod_10, score_mod_10_vectorized),
(score_mod_11, score_mod_11_vectorized),
]
SEQLEN_CONFIGS = [
(1, 1),
(64, 128),
(128, 192),
(256, 256),
(239, 1),
(799, 3),
(113, 203),
(113, 128),
(128, 217),
(113, 211),
(108, 256),
(256, 512),
(384, 256),
(640, 128),
(512, 256),
(1024, 1024),
(1023, 1024),
(1024, 1023),
(4096, 4096),
(4224, 4224),
]
VEC_SIZES_TO_CHECK_EQUALITY = [1, 4]
def create_tensors(
batch_size=2, num_heads=4, seqlen_q=64, seqlen_kv=64, dim=128, dtype=torch.bfloat16
):
q = torch.randn(batch_size, num_heads, seqlen_q, dim, device="cuda", dtype=dtype)
k = torch.randn(batch_size, num_heads, seqlen_kv, dim, device="cuda", dtype=dtype)
v = torch.randn(batch_size, num_heads, seqlen_kv, dim, device="cuda", dtype=dtype)
return q, k, v
def run_cute_flash(q, k, v, cute_score_mod, aux_tensors=None, pack_gqa=False) -> torch.Tensor:
q_transposed, k_transposed, v_transposed = map(lambda x: x.transpose(1, 2), (q, k, v))
out = torch.empty_like(q_transposed)
_flash_attn_fwd(
q_transposed,
k_transposed,
v_transposed,
return_lse=True,
score_mod=cute_score_mod,
out=out,
lse=None,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
)
return out.transpose(1, 2)
def run_flex_reference(q, k, v, eager_score_mod, dtype=None) -> torch.Tensor:
if dtype is not None:
q, k, v = q.to(dtype), k.to(dtype), v.to(dtype)
return flex_attention(q, k, v, score_mod=eager_score_mod, enable_gqa=q.shape[1] != k.shape[1])
@pytest.mark.parametrize("seqlen_q,seqlen_kv", SEQLEN_CONFIGS)
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(1, 2), (4, 2)])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("score_mod_pair", TEST_PAIRS)
def test_cute_vs_flex_attention(
seqlen_q, seqlen_kv, qhead_per_kvhead, num_kv_heads, dtype, score_mod_pair
):
torch.random.manual_seed(42)
cute_score_mod, eager_score_mod = score_mod_pair
num_q_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
q, k, v = create_tensors(
seqlen_q=seqlen_q, seqlen_kv=seqlen_kv, num_heads=num_q_heads, dtype=dtype
)
if pack_gqa:
k = k[:, :num_kv_heads, :, :].clone()
v = v[:, :num_kv_heads, :, :].clone()
out_ref_fp32 = run_flex_reference(q, k, v, eager_score_mod, dtype=torch.float32)
out_pt = run_flex_reference(q, k, v, eager_score_mod)
out_cute = run_cute_flash(q, k, v, cute_score_mod, pack_gqa=pack_gqa)
# Basic shape and NaN checks
assert out_cute.shape == out_ref_fp32.shape == out_pt.shape
assert not torch.isnan(out_cute).any()
assert not torch.isnan(out_ref_fp32).any()
assert not torch.isnan(out_pt).any()
assert torch.isfinite(out_cute).all()
assert torch.isfinite(out_ref_fp32).all()
assert torch.isfinite(out_pt).all()
# Numerical error if we just do any arithmetic on out_ref
fwd_atol = 2 * (out_ref_fp32 + 0.3 - 0.3 - out_ref_fp32).abs().max().item()
rtol = 2
# Calculate actual errors
pt_error = (out_pt - out_ref_fp32).abs().max().item()
cute_error = (out_cute - out_ref_fp32).abs().max().item()
print(f"\nNumerical comparison for {cute_score_mod.__name__}:")
print(f" PyTorch vs FP32 ref max error: {pt_error:.2e}")
print(f" CuTE vs FP32 ref max error: {cute_error:.2e}")
print(f" Dynamic absolute tolerance: {fwd_atol:.2e}")
print(f" Error ratio (CuTE/PyTorch): {cute_error / max(pt_error, 1e-10):.2f}")
# Assert that CuTE's error is at most rtol times PyTorch's error + fwd_atol
assert cute_error <= rtol * pt_error + fwd_atol, (
f"CuTE error {cute_error:.2e} exceeds {rtol}x PyTorch error {pt_error:.2e} + {fwd_atol:.2e}"
)
@pytest.mark.parametrize("seqlen_q,seqlen_kv", SEQLEN_CONFIGS)
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(1, 1), (4, 2)])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("score_mod_vec_pair", TEST_PAIRS_VECTORIZED)
def test_cute_score_mod_vectorized(
seqlen_q,
seqlen_kv,
qhead_per_kvhead,
num_kv_heads,
dtype,
score_mod_vec_pair,
):
"""Tests equality between original and vectorized versions of score mods"""
torch.random.manual_seed(42)
cute_score_mod, cute_vectorized_score_mod = score_mod_vec_pair
num_q_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
q, k, v = create_tensors(
seqlen_q=seqlen_q, seqlen_kv=seqlen_kv, num_heads=num_q_heads, dtype=dtype
)
if pack_gqa:
k = k[:, :num_kv_heads, :, :].clone()
v = v[:, :num_kv_heads, :, :].clone()
out_ref = run_cute_flash(q, k, v, cute_score_mod, pack_gqa=pack_gqa)
for vec_size in VEC_SIZES_TO_CHECK_EQUALITY:
cute_vectorized_score_mod.__vec_size__ = vec_size
out = run_cute_flash(q, k, v, cute_vectorized_score_mod, pack_gqa=pack_gqa)
assert torch.equal(out, out_ref)
@pytest.mark.parametrize("seqlen_q,seqlen_kv", SEQLEN_CONFIGS)
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(1, 1), (4, 2)])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("score_mod_pair", TEST_PAIRS_WITH_AUX_TENSORS)
def test_cute_vs_flex_attention_with_aux_tensors(
seqlen_q, seqlen_kv, qhead_per_kvhead, num_kv_heads, dtype, score_mod_pair
):
torch.random.manual_seed(42)
cute_score_mod, eager_score_mod_factory = score_mod_pair
batch_size = 2
num_q_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
q, k, v = create_tensors(
batch_size=batch_size,
seqlen_q=seqlen_q,
seqlen_kv=seqlen_kv,
num_heads=num_q_heads,
dtype=dtype,
)
if pack_gqa:
k = k[:, :num_kv_heads, :, :].clone()
v = v[:, :num_kv_heads, :, :].clone()
if cute_score_mod == score_mod_10:
buffer = torch.randn(batch_size, device="cuda", dtype=dtype) * 0.1
aux_tensors = [buffer]
eager_score_mod = eager_score_mod_factory(buffer)
assert buffer.shape == (batch_size,)
elif cute_score_mod == score_mod_11:
head_bias = torch.randn(num_q_heads, device="cuda", dtype=dtype) * 0.2
pos_scale = torch.arange(seqlen_q, device="cuda", dtype=dtype) * 0.01
aux_tensors = [head_bias, pos_scale]
eager_score_mod = eager_score_mod_factory(head_bias, pos_scale)
assert head_bias.shape == (num_q_heads,)
assert pos_scale.shape == (seqlen_q,)
out_ref_fp32 = run_flex_reference(q, k, v, eager_score_mod, dtype=torch.float32)
out_pt = run_flex_reference(q, k, v, eager_score_mod)
out_cute = run_cute_flash(q, k, v, cute_score_mod, aux_tensors=aux_tensors, pack_gqa=pack_gqa)
# Basic shape and NaN checks
assert out_cute.shape == out_ref_fp32.shape == out_pt.shape
assert not torch.isnan(out_cute).any()
assert not torch.isnan(out_ref_fp32).any()
assert not torch.isnan(out_pt).any()
assert torch.isfinite(out_cute).all()
assert torch.isfinite(out_ref_fp32).all()
assert torch.isfinite(out_pt).all()
# Numerical error if we just do any arithmetic on out_ref
fwd_atol = 2 * (out_ref_fp32 + 0.3 - 0.3 - out_ref_fp32).abs().max().item()
rtol = 2
# Calculate actual errors
pt_error = (out_pt - out_ref_fp32).abs().max().item()
cute_error = (out_cute - out_ref_fp32).abs().max().item()
print(f"\nNumerical comparison for {cute_score_mod.__name__}:")
print(f" PyTorch vs FP32 ref max error: {pt_error:.2e}")
print(f" CuTE vs FP32 ref max error: {cute_error:.2e}")
print(f" Dynamic absolute tolerance: {fwd_atol:.2e}")
print(f" Error ratio (CuTE/PyTorch): {cute_error / max(pt_error, 1e-10):.2f}")
# Assert that CuTE's error is at most rtol times PyTorch's error + fwd_atol
assert cute_error <= rtol * pt_error + fwd_atol, (
f"CuTE error {cute_error:.2e} exceeds {rtol}x PyTorch error {pt_error:.2e} + {fwd_atol:.2e}"
)
@pytest.mark.parametrize("seqlen_q,seqlen_kv", SEQLEN_CONFIGS)
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(1, 1), (4, 2)])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("score_mod_vec_pair", TEST_PAIRS_WITH_AUX_TENSORS_VECTORIZED)
def test_cute_score_mod_with_aux_tensors_vectorized(
seqlen_q,
seqlen_kv,
qhead_per_kvhead,
num_kv_heads,
dtype,
score_mod_vec_pair,
):
"""Tests equality between original and vectorized versions of score mods"""
torch.random.manual_seed(42)
cute_score_mod, cute_vectorized_score_mod = score_mod_vec_pair
batch_size = 2
num_q_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
q, k, v = create_tensors(
seqlen_q=seqlen_q, seqlen_kv=seqlen_kv, num_heads=num_q_heads, dtype=dtype
)
if pack_gqa:
k = k[:, :num_kv_heads, :, :].clone()
v = v[:, :num_kv_heads, :, :].clone()
if cute_score_mod == score_mod_10:
buffer = torch.randn(batch_size, device="cuda", dtype=dtype) * 0.1
aux_tensors = [buffer]
assert buffer.shape == (batch_size,)
elif cute_score_mod == score_mod_11:
head_bias = torch.randn(num_q_heads, device="cuda", dtype=dtype) * 0.2
pos_scale = torch.arange(seqlen_q, device="cuda", dtype=dtype) * 0.01
aux_tensors = [head_bias, pos_scale]
assert head_bias.shape == (num_q_heads,)
assert pos_scale.shape == (seqlen_q,)
out_ref = run_cute_flash(q, k, v, cute_score_mod, aux_tensors=aux_tensors, pack_gqa=pack_gqa)
for vec_size in VEC_SIZES_TO_CHECK_EQUALITY:
cute_vectorized_score_mod.__vec_size__ = vec_size
out = run_cute_flash(
q, k, v, cute_vectorized_score_mod, aux_tensors=aux_tensors, pack_gqa=pack_gqa
)
assert torch.equal(out, out_ref)
def _generate_block_kvcache(seqlen_k, page_size, batch_size, nheads_k, d, device, dtype):
import math
from einops import rearrange
num_blocks = math.ceil(seqlen_k / page_size) * batch_size * 3
k_cache_paged = torch.randn(num_blocks, page_size, nheads_k, d, device=device, dtype=dtype)
v_cache_paged = torch.randn(num_blocks, page_size, nheads_k, d, device=device, dtype=dtype)
page_table = rearrange(
torch.randperm(num_blocks, dtype=torch.int32, device=device),
"(b nblocks) -> b nblocks",
b=batch_size,
)
k_cache_bshd = rearrange(
k_cache_paged[page_table.flatten()],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k]
v_cache_bshd = rearrange(
v_cache_paged[page_table.flatten()],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k]
k_cache = k_cache_bshd.transpose(1, 2)
v_cache = v_cache_bshd.transpose(1, 2)
return k_cache, v_cache, page_table, k_cache_paged, v_cache_paged, num_blocks
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("page_size", [None, 1, 4, 128])
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(1, 2), (4, 2)])
@pytest.mark.parametrize(
"seqlen_q,seqlen_kv",
[
(1, 128),
(64, 256),
(64, 800),
(256, 256),
(113, 203),
],
)
@pytest.mark.parametrize("score_mod_pair", TEST_PAIRS)
@pytest.mark.skipif(COMPUTE_CAPABILITY != 10, reason="Paged KV cache only supported on SM100")
def test_score_mod_with_paged_kvcache(
seqlen_q,
seqlen_kv,
qhead_per_kvhead,
num_kv_heads,
page_size,
dtype,
score_mod_pair,
):
if COMPUTE_CAPABILITY == 9:
pytest.xfail("Paged KV cache only supported on SM100")
if page_size is not None and seqlen_kv % page_size != 0:
pytest.skip()
torch.random.manual_seed(42)
cute_score_mod, eager_score_mod = score_mod_pair
batch_size = 2
num_q_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
dim = 128
device = "cuda"
q = torch.randn(batch_size, num_q_heads, seqlen_q, dim, device=device, dtype=dtype)
if page_size is None:
k_cache = torch.randn(batch_size, num_kv_heads, seqlen_kv, dim, device=device, dtype=dtype)
v_cache = torch.randn(batch_size, num_kv_heads, seqlen_kv, dim, device=device, dtype=dtype)
page_table = None
k_cache_paged = None
v_cache_paged = None
else:
(
k_cache,
v_cache,
page_table,
k_cache_paged,
v_cache_paged,
num_blocks,
) = _generate_block_kvcache(
seqlen_kv, page_size, batch_size, num_kv_heads, dim, device, dtype
)
cache_seqlens = torch.randint(1, seqlen_kv + 1, (batch_size,), dtype=torch.int32, device=device)
from einops import rearrange
arange = rearrange(torch.arange(seqlen_kv, device=device), "s -> 1 s")
cache_seqlens_expanded = rearrange(cache_seqlens, "b -> b 1")
key_padding_mask = arange < cache_seqlens_expanded
if pack_gqa:
k_cache_rep = k_cache.repeat_interleave(qhead_per_kvhead, dim=1)
v_cache_rep = v_cache.repeat_interleave(qhead_per_kvhead, dim=1)
else:
k_cache_rep = k_cache
v_cache_rep = v_cache
def make_masked_score_mod(base_score_mod, seqused_k_tensor):
seqused_k_dev = seqused_k_tensor
def masked_score_mod(score, b, h, q_idx, kv_idx):
if base_score_mod is not None:
score = base_score_mod(score, b, h, q_idx, kv_idx)
seqlen_limit = torch.gather(seqused_k_dev, 0, b.long())
valid_mask = kv_idx < seqlen_limit
return torch.where(valid_mask, score, torch.full_like(score, float("-inf")))
return masked_score_mod
masked_score_mod_fp32 = make_masked_score_mod(eager_score_mod, cache_seqlens)
masked_score_mod = make_masked_score_mod(eager_score_mod, cache_seqlens)
out_ref_fp32 = run_flex_reference(
q, k_cache_rep, v_cache_rep, masked_score_mod_fp32, dtype=torch.float32
)
out_pt = run_flex_reference(q, k_cache_rep, v_cache_rep, masked_score_mod)
q_bshd = q.transpose(1, 2)
out_cute = torch.empty_like(q_bshd)
if page_size is None:
k_bshd = k_cache.transpose(1, 2)
v_bshd = v_cache.transpose(1, 2)
_flash_attn_fwd(
q_bshd,
k_bshd,
v_bshd,
seqused_k=cache_seqlens,
return_lse=True,
score_mod=cute_score_mod,
out=out_cute,
lse=None,
pack_gqa=pack_gqa,
)
else:
_flash_attn_fwd(
q_bshd,
k_cache_paged,
v_cache_paged,
seqused_k=cache_seqlens,
page_table=page_table,
return_lse=True,
score_mod=cute_score_mod,
out=out_cute,
lse=None,
pack_gqa=pack_gqa,
)
out_cute = out_cute.transpose(1, 2)
assert out_cute.shape == out_ref_fp32.shape == out_pt.shape
assert not torch.isnan(out_cute).any()
assert not torch.isnan(out_ref_fp32).any()
assert not torch.isnan(out_pt).any()
assert torch.isfinite(out_cute).all()
assert torch.isfinite(out_ref_fp32).all()
assert torch.isfinite(out_pt).all()
fwd_atol = 2 * (out_ref_fp32 + 0.3 - 0.3 - out_ref_fp32).abs().max().item()
rtol = 2
pt_error = (out_pt - out_ref_fp32).abs().max().item()
cute_error = (out_cute - out_ref_fp32).abs().max().item()
print(f"\nNumerical comparison for {cute_score_mod.__name__} (paged={page_size is not None}):")
print(f" PyTorch vs FP32 ref max error: {pt_error:.2e}")
print(f" CuTE vs FP32 ref max error: {cute_error:.2e}")
print(f" Dynamic absolute tolerance: {fwd_atol:.2e}")
print(f" Error ratio (CuTE/PyTorch): {cute_error / max(pt_error, 1e-10):.2f}")
assert cute_error <= rtol * pt_error + fwd_atol, (
f"CuTE error {cute_error:.2e} exceeds {rtol}x PyTorch error {pt_error:.2e} + {fwd_atol:.2e}"
)
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("page_size", [None, 128])
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(1, 1), (4, 2)])
@pytest.mark.parametrize(
"seqlen_q,seqlen_kv",
[
(64, 128),
(128, 256),
(256, 256),
],
)
@pytest.mark.parametrize("score_mod_pair", TEST_PAIRS_WITH_AUX_TENSORS)
@pytest.mark.skipif(COMPUTE_CAPABILITY != 10, reason="Paged KV cache only supported on SM100")
def test_score_mod_with_paged_kvcache_aux_tensors(
seqlen_q,
seqlen_kv,
qhead_per_kvhead,
num_kv_heads,
page_size,
dtype,
score_mod_pair,
):
if COMPUTE_CAPABILITY == 9:
pytest.xfail("Paged KV cache only supported on SM100")
if page_size is not None and seqlen_kv % page_size != 0:
pytest.skip()
torch.random.manual_seed(42)
cute_score_mod, eager_score_mod_factory = score_mod_pair
batch_size = 2
num_q_heads = num_kv_heads * qhead_per_kvhead
pack_gqa = qhead_per_kvhead > 1
dim = 128
device = "cuda"
q = torch.randn(batch_size, num_q_heads, seqlen_q, dim, device=device, dtype=dtype)
if page_size is None:
k_cache = torch.randn(batch_size, num_kv_heads, seqlen_kv, dim, device=device, dtype=dtype)
v_cache = torch.randn(batch_size, num_kv_heads, seqlen_kv, dim, device=device, dtype=dtype)
page_table = None
k_cache_paged = None
v_cache_paged = None
else:
(
k_cache,
v_cache,
page_table,
k_cache_paged,
v_cache_paged,
num_blocks,
) = _generate_block_kvcache(
seqlen_kv, page_size, batch_size, num_kv_heads, dim, device, dtype
)
cache_seqlens = torch.randint(1, seqlen_kv + 1, (batch_size,), dtype=torch.int32, device=device)
if cute_score_mod == score_mod_10:
buffer = torch.randn(batch_size, device=device, dtype=dtype) * 0.1
aux_tensors = [buffer]
eager_score_mod = eager_score_mod_factory(buffer)
elif cute_score_mod == score_mod_11:
head_bias = torch.randn(num_q_heads, device=device, dtype=dtype) * 0.2
pos_scale = torch.arange(seqlen_q, device=device, dtype=dtype) * 0.01
aux_tensors = [head_bias, pos_scale]
eager_score_mod = eager_score_mod_factory(head_bias, pos_scale)
from einops import rearrange
arange = rearrange(torch.arange(seqlen_kv, device=device), "s -> 1 s")
cache_seqlens_expanded = rearrange(cache_seqlens, "b -> b 1")
key_padding_mask = arange < cache_seqlens_expanded
if pack_gqa:
k_cache_rep = k_cache.repeat_interleave(qhead_per_kvhead, dim=1)
v_cache_rep = v_cache.repeat_interleave(qhead_per_kvhead, dim=1)
else:
k_cache_rep = k_cache
v_cache_rep = v_cache
def make_masked_score_mod(base_score_mod, seqused_k_tensor):
seqused_k_dev = seqused_k_tensor
def masked_score_mod(score, b, h, q_idx, kv_idx):
if base_score_mod is not None:
score = base_score_mod(score, b, h, q_idx, kv_idx)
seqlen_limit = torch.gather(seqused_k_dev, 0, b.long())
valid_mask = kv_idx < seqlen_limit
return torch.where(valid_mask, score, torch.full_like(score, float("-inf")))
return masked_score_mod
masked_score_mod_fp32 = make_masked_score_mod(eager_score_mod, cache_seqlens)
masked_score_mod = make_masked_score_mod(eager_score_mod, cache_seqlens)
out_ref_fp32 = run_flex_reference(
q, k_cache_rep, v_cache_rep, masked_score_mod_fp32, dtype=torch.float32
)
out_pt = run_flex_reference(q, k_cache_rep, v_cache_rep, masked_score_mod)
q_bshd = q.transpose(1, 2)
out_cute = torch.empty_like(q_bshd)
if page_size is None:
k_bshd = k_cache.transpose(1, 2)
v_bshd = v_cache.transpose(1, 2)
_flash_attn_fwd(
q_bshd,
k_bshd,
v_bshd,
seqused_k=cache_seqlens,
return_lse=True,
score_mod=cute_score_mod,
out=out_cute,
lse=None,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
)
else:
_flash_attn_fwd(
q_bshd,
k_cache_paged,
v_cache_paged,
seqused_k=cache_seqlens,
page_table=page_table,
return_lse=True,
score_mod=cute_score_mod,
out=out_cute,
lse=None,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
)
out_cute = out_cute.transpose(1, 2)
assert out_cute.shape == out_ref_fp32.shape == out_pt.shape
assert not torch.isnan(out_cute).any()
assert not torch.isnan(out_ref_fp32).any()
assert not torch.isnan(out_pt).any()
assert torch.isfinite(out_cute).all()
assert torch.isfinite(out_ref_fp32).all()
assert torch.isfinite(out_pt).all()
fwd_atol = 2 * (out_ref_fp32 + 0.3 - 0.3 - out_ref_fp32).abs().max().item()
rtol = 2
pt_error = (out_pt - out_ref_fp32).abs().max().item()
cute_error = (out_cute - out_ref_fp32).abs().max().item()
print(f"\nNumerical comparison for {cute_score_mod.__name__} (paged={page_size is not None}):")
print(f" PyTorch vs FP32 ref max error: {pt_error:.2e}")
print(f" CuTE vs FP32 ref max error: {cute_error:.2e}")
print(f" Dynamic absolute tolerance: {fwd_atol:.2e}")
print(f" Error ratio (CuTE/PyTorch): {cute_error / max(pt_error, 1e-10):.2f}")
assert cute_error <= rtol * pt_error + fwd_atol, (
f"CuTE error {cute_error:.2e} exceeds {rtol}x PyTorch error {pt_error:.2e} + {fwd_atol:.2e}"
)
@cute.jit
def score_mod_bwd_5(grad, score, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
"""Backward for score_mod_5 (times_two): d(score*2)/d(score) = 2."""
return grad * cute.full_like(grad, 2.0)
@cute.jit
def score_mod_bwd_3(grad, score, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
"""Backward for score_mod_3 (relative_bias): d(score + |q-kv|)/d(score) = 1."""
return grad
@cute.jit
def score_mod_bwd_identity(grad, score, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
return grad
@cute.jit
def score_mod_bwd_causal(grad, score, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
"""Backward for causal masking: d(where(mask, score, -inf))/d(score) = where(mask, 1, 0).
At unmasked positions (q_idx >= kv_idx), grad passes through.
At masked positions (q_idx < kv_idx), the kernel already zeros grad because P=0.
"""
return grad
@cute.jit
def score_mod_squared(tSrS_ssa, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
"""Forward: score ** 2."""
return tSrS_ssa * tSrS_ssa
@cute.jit
def score_mod_bwd_squared(grad, score, b_idx, h_idx, q_idx, kv_idx, seqlen_info, aux_tensors):
"""Backward for score**2: d(score**2)/d(score) = 2*score."""
return grad * cute.full_like(grad, 2.0) * score
def score_squared_eager(score, b, h, q_idx, kv_idx):
return score * score
BWD_TEST_PAIRS = [
(score_mod_5, score_mod_bwd_5, times_two_eager),
(score_mod_3, score_mod_bwd_3, relative_bias_eager),
(score_mod_squared, score_mod_bwd_squared, score_squared_eager),
(score_mod_2, score_mod_bwd_causal, causal_mask_eager),
]
BWD_TEST_PAIRS_WITH_AUX = [
(score_mod_10, score_mod_bwd_identity, batch_bias),
(score_mod_11, score_mod_bwd_identity, dual_buffer_bias),
]
BWD_TEST_PAIRS_PACK_GQA = [
(score_mod_5, score_mod_bwd_5, times_two_eager),
(score_mod_3, score_mod_bwd_3, relative_bias_eager),
]
def run_cute_flash_bwd(
q, k, v, cute_score_mod, cute_score_mod_bwd, aux_tensors=None, pack_gqa=False
):
"""Run flash attention forward + backward with score_mod."""
q_t = q.transpose(1, 2)
k_t = k.transpose(1, 2)
v_t = v.transpose(1, 2)
out, lse = _flash_attn_fwd(
q_t,
k_t,
v_t,
return_lse=True,
score_mod=cute_score_mod,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
)
grad_out = torch.randn_like(out)
dq, dk, dv = _flash_attn_bwd(
q_t,
k_t,
v_t,
out,
grad_out,
lse,
score_mod=cute_score_mod,
score_mod_bwd=cute_score_mod_bwd,
aux_tensors=aux_tensors,
pack_gqa=pack_gqa,
)
return (
out.transpose(1, 2),
grad_out.transpose(1, 2),
dq.transpose(1, 2),
dk.transpose(1, 2),
dv.transpose(1, 2),
)
def run_flex_reference_bwd(q, k, v, eager_score_mod, grad_out, dtype=None):
"""Run flex_attention forward + backward for reference."""
if dtype is not None:
q = q.to(dtype).requires_grad_(True)
k = k.to(dtype).requires_grad_(True)
v = v.to(dtype).requires_grad_(True)
grad_out = grad_out.to(dtype)
else:
q = q.requires_grad_(True)
k = k.requires_grad_(True)
v = v.requires_grad_(True)
compiled_flex = torch.compile(flex_attention)
out = compiled_flex(q, k, v, score_mod=eager_score_mod, enable_gqa=q.shape[1] != k.shape[1])
dq, dk, dv = torch.autograd.grad(out, (q, k, v), grad_out)
return out, dq, dk, dv
@pytest.mark.parametrize(
"seqlen_q,seqlen_kv",
[
(64, 64),
(128, 128),
(256, 256),
(512, 512),
(799, 3),
(3, 799),
(128, 256),
(256, 128),
(113, 203),
],
)
@pytest.mark.parametrize("dim", [64, 128])
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16])
@pytest.mark.parametrize("score_mod_triple", BWD_TEST_PAIRS)
def test_cute_vs_flex_attention_backward(seqlen_q, seqlen_kv, dim, dtype, score_mod_triple):
"""Test backward pass with score_mod against flex_attention reference."""
if COMPUTE_CAPABILITY == 9 and dim == 64:
pytest.skip("head_dim=64 not supported on SM90 for backward")
torch.random.manual_seed(42)
cute_fwd, cute_bwd, eager_ref = score_mod_triple
q, k, v = create_tensors(
seqlen_q=seqlen_q, seqlen_kv=seqlen_kv, num_heads=4, dim=dim, dtype=dtype
)
out_cute, grad_out, dq_cute, dk_cute, dv_cute = run_cute_flash_bwd(q, k, v, cute_fwd, cute_bwd)
out_ref_fp32, dq_ref_fp32, dk_ref_fp32, dv_ref_fp32 = run_flex_reference_bwd(
q, k, v, eager_ref, grad_out, dtype=torch.float32
)
out_pt, dq_pt, dk_pt, dv_pt = run_flex_reference_bwd(q, k, v, eager_ref, grad_out)
assert not torch.isnan(dq_cute).any(), "dQ contains NaN"
assert not torch.isnan(dk_cute).any(), "dK contains NaN"
assert not torch.isnan(dv_cute).any(), "dV contains NaN"
rtol = 2
dq_atol = 2 * (dq_ref_fp32 + 0.3 - 0.3 - dq_ref_fp32).abs().max().item()
dk_atol = 2 * (dk_ref_fp32 + 0.3 - 0.3 - dk_ref_fp32).abs().max().item()
dv_atol = 2 * (dv_ref_fp32 + 0.3 - 0.3 - dv_ref_fp32).abs().max().item()
dq_ref = dq_ref_fp32.to(dtype)
dk_ref = dk_ref_fp32.to(dtype)
dv_ref = dv_ref_fp32.to(dtype)
pt_dq_err = (dq_pt - dq_ref).abs().max().item()
pt_dk_err = (dk_pt - dk_ref).abs().max().item()
pt_dv_err = (dv_pt - dv_ref).abs().max().item()
cute_dq_err = (dq_cute - dq_ref).abs().max().item()
cute_dk_err = (dk_cute - dk_ref).abs().max().item()
cute_dv_err = (dv_cute - dv_ref).abs().max().item()
print(f"\nBackward comparison for {cute_fwd.__name__}:")
print(f" dQ: PT err={pt_dq_err:.2e}, CuTE err={cute_dq_err:.2e}, atol={dq_atol:.2e}")
print(f" dK: PT err={pt_dk_err:.2e}, CuTE err={cute_dk_err:.2e}, atol={dk_atol:.2e}")
print(f" dV: PT err={pt_dv_err:.2e}, CuTE err={cute_dv_err:.2e}, atol={dv_atol:.2e}")
assert cute_dq_err <= rtol * pt_dq_err + dq_atol, f"dQ error too large: {cute_dq_err:.2e}"
assert cute_dk_err <= rtol * pt_dk_err + dk_atol, f"dK error too large: {cute_dk_err:.2e}"
assert cute_dv_err <= rtol * pt_dv_err + dv_atol, f"dV error too large: {cute_dv_err:.2e}"
def make_aux_tensors_for_bwd(cute_score_mod, eager_factory, seqlen_q, num_heads, batch_size, dtype):
if cute_score_mod == score_mod_10:
buffer = torch.randn(batch_size, device="cuda", dtype=dtype) * 0.1
return [buffer], eager_factory(buffer)
head_bias = torch.randn(num_heads, device="cuda", dtype=dtype) * 0.2
pos_scale = torch.arange(seqlen_q, device="cuda", dtype=dtype) * 0.01
return [head_bias, pos_scale], eager_factory(head_bias, pos_scale)
@pytest.mark.parametrize(
"seqlen_q,seqlen_kv",
[
(64, 64),
(128, 128),
(256, 128),
],
)
@pytest.mark.parametrize("dim", [64, 128])
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16])
@pytest.mark.parametrize("score_mod_triple", BWD_TEST_PAIRS_WITH_AUX)
def test_cute_vs_flex_attention_backward_with_aux(
seqlen_q, seqlen_kv, dim, dtype, score_mod_triple
):
if COMPUTE_CAPABILITY == 9 and dim == 64:
pytest.skip("head_dim=64 not supported on SM90 for backward")
torch.random.manual_seed(42)
cute_fwd, cute_bwd, eager_factory = score_mod_triple
q, k, v = create_tensors(
seqlen_q=seqlen_q, seqlen_kv=seqlen_kv, num_heads=4, dim=dim, dtype=dtype
)
aux_tensors, eager_ref = make_aux_tensors_for_bwd(
cute_fwd, eager_factory, seqlen_q, q.shape[1], q.shape[0], dtype
)
out_cute, grad_out, dq_cute, dk_cute, dv_cute = run_cute_flash_bwd(
q, k, v, cute_fwd, cute_bwd, aux_tensors=aux_tensors
)
out_ref_fp32, dq_ref_fp32, dk_ref_fp32, dv_ref_fp32 = run_flex_reference_bwd(
q, k, v, eager_ref, grad_out, dtype=torch.float32
)
out_pt, dq_pt, dk_pt, dv_pt = run_flex_reference_bwd(q, k, v, eager_ref, grad_out)
assert not torch.isnan(dq_cute).any()
assert not torch.isnan(dk_cute).any()
assert not torch.isnan(dv_cute).any()
rtol = 3
dq_atol = 2 * (dq_ref_fp32 + 0.3 - 0.3 - dq_ref_fp32).abs().max().item()
dk_atol = 2 * (dk_ref_fp32 + 0.3 - 0.3 - dk_ref_fp32).abs().max().item()
dv_atol = 2 * (dv_ref_fp32 + 0.3 - 0.3 - dv_ref_fp32).abs().max().item()
dq_ref = dq_ref_fp32.to(dtype)
dk_ref = dk_ref_fp32.to(dtype)
dv_ref = dv_ref_fp32.to(dtype)
pt_dq_err = (dq_pt - dq_ref).abs().max().item()
pt_dk_err = (dk_pt - dk_ref).abs().max().item()
pt_dv_err = (dv_pt - dv_ref).abs().max().item()
cute_dq_err = (dq_cute - dq_ref).abs().max().item()
cute_dk_err = (dk_cute - dk_ref).abs().max().item()
cute_dv_err = (dv_cute - dv_ref).abs().max().item()
print(f"\nBackward comparison with aux for {cute_fwd.__name__}:")
print(f" dQ: PT err={pt_dq_err:.2e}, CuTE err={cute_dq_err:.2e}, atol={dq_atol:.2e}")
print(f" dK: PT err={pt_dk_err:.2e}, CuTE err={cute_dk_err:.2e}, atol={dk_atol:.2e}")
print(f" dV: PT err={pt_dv_err:.2e}, CuTE err={cute_dv_err:.2e}, atol={dv_atol:.2e}")
assert cute_dq_err <= rtol * pt_dq_err + dq_atol, f"dQ error too large: {cute_dq_err:.2e}"
assert cute_dk_err <= rtol * pt_dk_err + dk_atol, f"dK error too large: {cute_dk_err:.2e}"
assert cute_dv_err <= rtol * pt_dv_err + dv_atol, f"dV error too large: {cute_dv_err:.2e}"
@pytest.mark.parametrize("seqlen_q,seqlen_kv", [(128, 128), (128, 256)])
@pytest.mark.parametrize("dim", [64, 128])
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16])
@pytest.mark.parametrize("qhead_per_kvhead,num_kv_heads", [(4, 2)])
@pytest.mark.parametrize("score_mod_triple", BWD_TEST_PAIRS_PACK_GQA)
def test_cute_vs_flex_attention_backward_pack_gqa(
seqlen_q, seqlen_kv, dim, dtype, qhead_per_kvhead, num_kv_heads, score_mod_triple
):
if COMPUTE_CAPABILITY == 9:
pytest.xfail("pack_gqa backward not yet implemented on SM90")
torch.random.manual_seed(42)
cute_fwd, cute_bwd, eager_ref = score_mod_triple
num_q_heads = num_kv_heads * qhead_per_kvhead
q, k, v = create_tensors(
seqlen_q=seqlen_q, seqlen_kv=seqlen_kv, num_heads=num_q_heads, dim=dim, dtype=dtype
)
k = k[:, :num_kv_heads, :, :].clone()
v = v[:, :num_kv_heads, :, :].clone()
out_cute, grad_out, dq_cute, dk_cute, dv_cute = run_cute_flash_bwd(
q, k, v, cute_fwd, cute_bwd, pack_gqa=True
)
out_ref_fp32, dq_ref_fp32, dk_ref_fp32, dv_ref_fp32 = run_flex_reference_bwd(
q, k, v, eager_ref, grad_out, dtype=torch.float32
)
out_pt, dq_pt, dk_pt, dv_pt = run_flex_reference_bwd(q, k, v, eager_ref, grad_out)
assert not torch.isnan(dq_cute).any()
assert not torch.isnan(dk_cute).any()
assert not torch.isnan(dv_cute).any()
rtol = 3
dq_atol = 2 * (dq_ref_fp32 + 0.3 - 0.3 - dq_ref_fp32).abs().max().item()
dk_atol = 2 * (dk_ref_fp32 + 0.3 - 0.3 - dk_ref_fp32).abs().max().item()
dv_atol = 2 * (dv_ref_fp32 + 0.3 - 0.3 - dv_ref_fp32).abs().max().item()
dq_ref = dq_ref_fp32.to(dtype)
dk_ref = dk_ref_fp32.to(dtype)
dv_ref = dv_ref_fp32.to(dtype)
pt_dq_err = (dq_pt - dq_ref).abs().max().item()
pt_dk_err = (dk_pt - dk_ref).abs().max().item()
pt_dv_err = (dv_pt - dv_ref).abs().max().item()
cute_dq_err = (dq_cute - dq_ref).abs().max().item()
cute_dk_err = (dk_cute - dk_ref).abs().max().item()
cute_dv_err = (dv_cute - dv_ref).abs().max().item()
print(f"\nBackward Pack-GQA comparison for {cute_fwd.__name__}:")
print(f" dQ: PT err={pt_dq_err:.2e}, CuTE err={cute_dq_err:.2e}, atol={dq_atol:.2e}")
print(f" dK: PT err={pt_dk_err:.2e}, CuTE err={cute_dk_err:.2e}, atol={dk_atol:.2e}")
print(f" dV: PT err={pt_dv_err:.2e}, CuTE err={cute_dv_err:.2e}, atol={dv_atol:.2e}")
assert cute_dq_err <= rtol * pt_dq_err + dq_atol, f"dQ error too large: {cute_dq_err:.2e}"
assert cute_dk_err <= rtol * pt_dk_err + dk_atol, f"dK error too large: {cute_dk_err:.2e}"
assert cute_dv_err <= rtol * pt_dv_err + dv_atol, f"dV error too large: {cute_dv_err:.2e}"
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "tests/cute/test_score_mod.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 846,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:hopper/test_flash_attn_bwd_determinism.py | import os
import math
import itertools
import pytest
import torch
import torch.nn.functional as F
from torch._C import parse_schema
from einops import rearrange, repeat
try:
from flash_attn.layers.rotary import apply_rotary_emb
except ImportError:
apply_rotary_emb = None
from padding import pad_input, unpad_input
from test_util import (
attention_ref,
generate_qkv,
generate_random_padding_mask,
)
from flash_attn_interface import flash_attn_func, flash_attn_varlen_func, flash_attn_combine
from flash_attn_interface import flash_attn_with_kvcache, get_scheduler_metadata
from flash_attn_interface import _flash_attn_backward
DISABLE_BACKWARD = os.getenv("FLASH_ATTENTION_DISABLE_BACKWARD", "FALSE") == "TRUE"
DISABLE_SPLIT = os.getenv("FLASH_ATTENTION_DISABLE_SPLIT", "FALSE") == "TRUE"
DISABLE_PAGEDKV = os.getenv("FLASH_ATTENTION_DISABLE_PAGEDKV", "FALSE") == "TRUE"
DISABLE_APPENDKV = os.getenv("FLASH_ATTENTION_DISABLE_APPENDKV", "FALSE") == "TRUE"
DISABLE_LOCAL = os.getenv("FLASH_ATTENTION_DISABLE_LOCAL", "FALSE") == "TRUE"
DISABLE_SOFTCAP = os.getenv("FLASH_ATTENTION_DISABLE_SOFTCAP", "FALSE") == "TRUE"
DISABLE_PACKGQA = os.getenv("FLASH_ATTENTION_DISABLE_PACKGQA", "FALSE") == "TRUE"
DISABLE_FP16 = os.getenv("FLASH_ATTENTION_DISABLE_FP16", "FALSE") == "TRUE"
DISABLE_FP8 = os.getenv("FLASH_ATTENTION_DISABLE_FP8", "FALSE") == "TRUE" or torch.cuda.get_device_capability("cuda")[0] < 9
DISABLE_HDIM64 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM64", "FALSE") == "TRUE"
DISABLE_HDIM96 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM96", "FALSE") == "TRUE"
DISABLE_HDIM128 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM128", "FALSE") == "TRUE"
DISABLE_HDIM192 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM192", "FALSE") == "TRUE"
DISABLE_HDIM256 = os.getenv("FLASH_ATTENTION_DISABLE_HDIM256", "FALSE") == "TRUE"
# deterministic mode not supported for hdim 256
DISABLE_HDIM256 = True
COMPILED_HDIMS = (
[]
+ ([64] if not DISABLE_HDIM64 else [])
+ ([96] if not DISABLE_HDIM96 else [])
+ ([128] if not DISABLE_HDIM128 else [])
+ ([192] if not DISABLE_HDIM192 else [])
+ ([256] if not DISABLE_HDIM256 else [])
)
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
# @pytest.mark.parametrize("dtype", [torch.bfloat16] + ([torch.float16] if not DISABLE_FP16 else []) + ([torch.float8_e4m3fn] if not DISABLE_FP8 else []))
@pytest.mark.parametrize("dtype", [torch.bfloat16])
# @pytest.mark.parametrize("dtype", [torch.float8_e4m3fn])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["mqa"])
# @pytest.mark.parametrize("has_qv", [False, True])
@pytest.mark.parametrize("has_qv", [False])
@pytest.mark.parametrize("deterministic", [False, True])
# @pytest.mark.parametrize("deterministic", [True])
@pytest.mark.parametrize("softcap", [0.0] + ([15.0] if not DISABLE_SOFTCAP else []))
# @pytest.mark.parametrize("softcap", [0.0])
@pytest.mark.parametrize("local", [False] + ([True] if not DISABLE_LOCAL else []))
# @pytest.mark.parametrize("local", [True])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [False])
# @pytest.mark.parametrize("V_colmajor", [False, True])
@pytest.mark.parametrize("V_colmajor", [False])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192, 256])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
# @pytest.mark.parametrize("d", [64, 128, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128])
# @pytest.mark.parametrize("d", [64, 96, 128, 192])
@pytest.mark.parametrize("d", COMPILED_HDIMS)
# @pytest.mark.parametrize("d", [128])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(1, 1),
(64, 128),
(128, 192),
(256, 256),
(239, 1),
(799, 3),
(113, 203),
(113, 128),
(128, 217),
(113, 211),
(108, 256),
(256, 512),
(384, 256),
(640, 128),
(512, 256),
(1024, 1024),
(1023, 1024),
(1024, 1023),
(4096, 4096),
# (4224, 4224),
# (8192, 8192),
],
)
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(128, 128)])
def test_flash_attn_output(
seqlen_q, seqlen_k, d, causal, local, softcap, V_colmajor, deterministic, has_qv, mha_type, dtype
):
if V_colmajor and (seqlen_k % 16 != 0 or dtype != torch.float8_e4m3fn):
pytest.skip("V_colmajor requires seqlen_k to be a multiple of 16 and dtype to be float8_e4m3fn")
if has_qv and (d != 64 or dtype == torch.float8_e4m3fn):
pytest.skip("Has Qv requires hdim 64 and dtype to be float16 or bfloat16 (not float8_e4m3fn)")
if deterministic and d == 256:
pytest.skip("Deterministic mode not supported for hdim 256")
device = "cuda"
# set seed
torch.random.manual_seed(0)
# batch_size = 40
# nheads = 16
batch_size = 9 if seqlen_k <= 2048 else 2
# batch_size = 1
nheads = 6
# nheads = 1
nheads_kv = nheads if mha_type == "mha" else (2 if mha_type == "gqa" else 1)
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
# dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
# if dtype == torch.float8_e4m3fn:
# dv_vals = [d]
# if has_qv:
# dv_vals = [256, 512]
# attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0] if not DISABLE_LOCAL else [0]
dv_vals = [d]
attention_chunk_vals = [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
print(f"{dv = }, {attention_chunk = }")
q_ref = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref)
if softcap > 0.0:
# Ensure the values of qk are at least within softcap range.
q_ref = (q_ref * softcap / 4)
q_ref = q_ref.to(dtype).to(dtype_ref).requires_grad_()
k_ref = torch.randn(batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref).requires_grad_()
v_ref = torch.randn(batch_size, seqlen_k, nheads_kv, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref).requires_grad_()
if has_qv:
qv_ref = torch.randn(batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
else:
qv_ref = None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,)).tolist()
# window_size = (-1, -1) if not local else (16, 0)
if dtype == torch.float8_e4m3fn:
q_descale, k_descale, v_descale = [torch.rand(batch_size, nheads_kv, device=device, dtype=torch.float32) * 2 for _ in range(3)]
else:
q_descale, k_descale, v_descale = None, None, None
q, k, v = [x.detach().to(dtype).requires_grad_() for x in (q_ref, k_ref, v_ref)]
qv = qv_ref.detach().to(dtype).requires_grad_() if has_qv else None
if V_colmajor:
v = rearrange(rearrange(v.detach(), "b s h d -> b h d s").contiguous(), "b h d s -> b s h d").requires_grad_()
out_ref, attn_ref = attention_ref(
q_ref,
k_ref,
v_ref,
None,
None,
causal=causal,
qv=qv_ref,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap
)
out_pt, attn_pt = attention_ref(
q_ref,
k_ref,
v_ref,
None,
None,
causal=causal,
qv=qv_ref,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
upcast=False,
reorder_ops=True,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None,
)
# qk = torch.einsum('bshd,bthd->bhst', q_ref, k_ref).float()
# if qv is not None:
# qk += torch.einsum('bshd,bthd->bhst', qv_ref, v_ref).float()
# m = qk.amax(-1, keepdim=True)
# s_tmp = torch.exp((qk - m) / math.sqrt(d))
# exp_sum = s_tmp.sum(-1)
# qk = torch.einsum('bthd,bshd->bhts', q_ref.float() / math.sqrt(d), k_ref.float())
# lse_ref = torch.logsumexp(qk, dim=-1)
# Numerical error if we just do any arithmetic on out_ref
fwd_atol = 2 * (out_ref + 0.3 - 0.3 - out_ref).abs().max().item()
rtol = 2 if softcap == 0.0 else 3
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
# pack_gqa_vals = [False, True] if not DISABLE_PACKGQA else [False]
# num_splits_vals = [1, 3] if not DISABLE_SPLIT else [1]
pack_gqa_vals = [False]
num_splits_vals = [1]
for pack_gqa, num_splits in itertools.product(pack_gqa_vals, num_splits_vals):
print(f"{pack_gqa = }, {num_splits = }")
out, softmax_lse = flash_attn_func(
q,
k,
v,
causal=causal,
qv=qv,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
pack_gqa=pack_gqa,
num_splits=num_splits,
return_attn_probs=True,
)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
# if not causal:
# print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (out - out_ref).abs().max().item() <= rtol * (out_pt - out_ref).abs().max().item() + fwd_atol
if (
not DISABLE_BACKWARD
and dtype != torch.float8_e4m3fn
and not V_colmajor
and not has_qv
and not dv > 256
and not attention_chunk != 0
):
g = torch.randn_like(out)
do_o = ((g.float() * out.float()).sum(-1)).transpose(1, 2)
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
dq, dk, dv, softmax_d = _flash_attn_backward(
g,
q,
k,
v,
out,
softmax_lse,
None, None, # cu_seqlens_q, cu_seqlens_k,
None, None, # sequed_q, sequed_k,
None, None, # max_seqlen_q, max_seqlen_k,
dq,
dk,
dv,
d ** (-0.5),
causal,
window_size=window_size,
softcap=softcap,
deterministic=deterministic,
)
# print(f"dO_O max diff: {(softmax_d - do_o).abs().max().item()}")
# assert (softmax_d - do_o).abs().max().item() <= 1e-5
# assert dq_accum.abs().max().item() == 0.0
# dS = torch.einsum('bthd,bshd->bhts', g.float(), v.float())
# P = torch.softmax(qk, -1)
# dP = P * (dS - do_o.transpose(1, 2).unsqueeze(1))
# dQ = torch.einsum('bhts,bshd->bthd', dP, k.float())
# dV = torch.einsum('bhts,bthd->bshd', P, g.float())
# dK = torch.einsum('bhts,bthd->bshd', dP, q.float())
# dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
dq_ref, dk_ref, dv_ref = torch.autograd.grad(out_ref, (q_ref, k_ref, v_ref), g)
dq_pt, dk_pt, dv_pt = torch.autograd.grad(out_pt, (q_ref, k_ref, v_ref), g)
print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
# breakpoint()
dq_atol = 2 * (dq_ref + 0.3 - 0.3 - dq_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dq - dq_ref).abs().max().item() <= rtol * (dq_pt - dq_ref).abs().max().item() + dq_atol
dk_atol = 2 * (dk_ref + 0.3 - 0.3 - dk_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dk - dk_ref).abs().max().item() <= rtol * (dk_pt - dk_ref).abs().max().item() + dk_atol
dv_atol = 2 * (dv_ref + 0.3 - 0.3 - dv_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dv - dv_ref).abs().max().item() <= rtol * (dv_pt - dv_ref).abs().max().item() + dv_atol
if deterministic:
iterations = 1000
for i in range(iterations):
dq2 = torch.empty_like(dq)
dk2 = torch.empty_like(dk)
dv2 = torch.empty_like(dv)
dq2, dk2, dv2, softmax_d = _flash_attn_backward(
g,
q,
k,
v,
out,
softmax_lse,
None, None, # cu_seqlens_q, cu_seqlens_k,
None, None, # sequed_q, sequed_k,
None, None, # max_seqlen_q, max_seqlen_k,
dq2,
dk2,
dv2,
d ** (-0.5),
causal,
window_size=window_size,
softcap=softcap,
deterministic=deterministic,
)
print(f'dq max diff with myself: {(dq2 - dq).abs().max().item()}')
print(f'dk max diff with myself: {(dk2 - dk).abs().max().item()}')
print(f'dv max diff with myself: {(dv2 - dv).abs().max().item()}')
assert torch.equal(dq, dq2), f"dq not deterministic"
assert torch.equal(dk, dk2), f"dk not deterministic"
assert torch.equal(dv, dv2), f"dv not deterministic"
print(f"✅ Iteration {i} passed!")
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
# @pytest.mark.parametrize("dtype", [torch.bfloat16] + ([torch.float16] if not DISABLE_FP16 else []) + ([torch.float8_e4m3fn] if not DISABLE_FP8 else []))
@pytest.mark.parametrize("dtype", [torch.bfloat16])
# @pytest.mark.parametrize("dtype", [torch.float8_e4m3fn])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["mha"])
# @pytest.mark.parametrize("has_qv", [False, True])
@pytest.mark.parametrize("has_qv", [False])
@pytest.mark.parametrize("deterministic", [False, True])
# @pytest.mark.parametrize("deterministic", [True])
@pytest.mark.parametrize("softcap", [0.0] + ([15.0] if not DISABLE_SOFTCAP else []))
# @pytest.mark.parametrize("softcap", [0.0])
@pytest.mark.parametrize("local", [False] + ([True] if not DISABLE_LOCAL else []))
# @pytest.mark.parametrize("local", [False])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [False])
@pytest.mark.parametrize("add_unused_qkv", [False, True])
# @pytest.mark.parametrize("add_unused_qkv", [True])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192, 256])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128])
# @pytest.mark.parametrize("d", [64, 96, 128])
@pytest.mark.parametrize("d", COMPILED_HDIMS)
# @pytest.mark.parametrize("d", [128])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(1, 1),
(1, 3),
(2, 1),
(511, 1),
(3, 513),
(64, 128),
(128, 128),
(256, 256),
(113, 203),
(128, 217),
(113, 211),
(108, 256),
(256, 512),
(307, 256),
(640, 128),
(512, 256),
(1024, 1024),
(1023, 1024),
(1024, 1023),
(1024, 1024),
(2048, 2048),
(4096, 4096),
],
)
def test_flash_attn_varlen_output(
seqlen_q, seqlen_k, d, add_unused_qkv, causal, local, softcap, deterministic, has_qv, mha_type, dtype,
):
if has_qv and (d != 64 or dtype == torch.float8_e4m3fn):
pytest.skip("Has Qv requires hdim 64 and dtype to be float16 or bfloat16 (not float8_e4m3fn)")
if deterministic and d == 256:
pytest.skip("Deterministic mode not supported for hdim 256")
device = "cuda"
# set seed
torch.random.manual_seed(seqlen_q + seqlen_k + d + int(causal) * 2 + int(local))
# batch_size = 40
# nheads = 16
batch_size = 9 if seqlen_q <= 2048 else 2
# batch_size = 32
nheads = 6
nheads_kv = nheads if mha_type == "mha" else (2 if mha_type == "gqa" else 1)
# batch_size = 2
# nheads = 1
# nheads_kv = nheads
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
# dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
# if dtype == torch.float8_e4m3fn:
# dv_vals = [d]
# if has_qv:
# dv_vals = [256, 512]
# attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0] if seqlen_q <= seqlen_k and not DISABLE_LOCAL else [0]
dv_vals = [d]
attention_chunk_vals = [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
print(f"{dv = }, {attention_chunk = }")
q_ref = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref)
if softcap > 0.0:
# Ensure the values of qk are at least within softcap range.
q_ref = (q_ref * softcap / 4).detach().requires_grad_()
q_ref = q_ref.to(dtype).to(dtype_ref).requires_grad_()
k_ref = torch.randn(batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref).requires_grad_()
v_ref = torch.randn(batch_size, seqlen_k, nheads_kv, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref).requires_grad_()
if has_qv:
qv_ref = torch.randn(batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref).to(dtype).to(dtype_ref)
else:
qv_ref = None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
if dtype == torch.float8_e4m3fn:
q_descale, k_descale, v_descale = [torch.rand(batch_size, nheads_kv, device=device, dtype=torch.float32) * 2 for _ in range(3)]
else:
q_descale, k_descale, v_descale = None, None, None
q, k, v = [x.detach().requires_grad_() for x in (q_ref, k_ref, v_ref)]
qv = qv_ref.detach() if has_qv else None
query_padding_mask = generate_random_padding_mask(
seqlen_q, batch_size, device, mode="random", zero_lengths=False
)
key_padding_mask = generate_random_padding_mask(
seqlen_k, batch_size, device, mode="random", zero_lengths=True
)
def _gen_unused_masks(padding_mask, add_unused, max_seq_len, bs, device):
if add_unused:
another_mask = generate_random_padding_mask(max_seq_len, bs, device)
attn_mask = torch.logical_and(padding_mask, another_mask)
unused_mask = torch.logical_xor(
torch.logical_or(padding_mask, another_mask), attn_mask
)
else:
attn_mask = padding_mask
unused_mask = None
return attn_mask, unused_mask
query_padding_mask, query_unused_mask = _gen_unused_masks(
query_padding_mask, add_unused_qkv, seqlen_q, batch_size, q.device
)
key_padding_mask, key_unused_mask = _gen_unused_masks(
key_padding_mask, add_unused_qkv, seqlen_k, batch_size, k.device
)
(
q_unpad,
k_unpad,
v_unpad,
qv_unpad,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
q,
k,
v,
qv,
output_pad_fn,
dq_pad_fn,
dk_pad_fn,
) = generate_qkv(q, k, v, query_padding_mask, key_padding_mask, qv=qv, kvpacked=False,
query_unused_mask=query_unused_mask, key_unused_mask=key_unused_mask)
q_unpad, k_unpad, v_unpad = [x.detach().to(dtype).requires_grad_() for x in (q_unpad, k_unpad, v_unpad)]
out_ref, attn_ref = attention_ref(
q_ref,
k_ref,
v_ref,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv_ref,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap
)
out_pt, attn_pt = attention_ref(
q_ref,
k_ref,
v_ref,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv_ref,
q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
upcast=False,
reorder_ops=True,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None,
)
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
if query_unused_mask is not None:
q_zero_masking = rearrange(query_unused_mask, "b s -> b s 1 1")
# Numerical error if we just do any arithmetic on out_ref
fwd_atol = 2 * (out_ref + 0.3 - 0.3 - out_ref).abs().max().item()
rtol = 2 if softcap == 0.0 else 3
# pack_gqa_vals = [False, True] if not DISABLE_PACKGQA else [False]
# num_splits_vals = [1, 3, 0] if not DISABLE_SPLIT else [1]
pack_gqa_vals = [False]
num_splits_vals = [1]
print("cu_seqlens_q: ", cu_seqlens_q)
print("cu_seqlens_k: ", cu_seqlens_k)
print("seqused_q: ", seqused_q)
print("seqused_k: ", seqused_k)
for pack_gqa, num_splits in itertools.product(pack_gqa_vals, num_splits_vals):
print(f"{pack_gqa = }, {num_splits = }")
out_unpad, softmax_lse = flash_attn_varlen_func(
q_unpad,
k_unpad,
v_unpad,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
seqused_q=seqused_q,
seqused_k=seqused_k,
causal=causal,
qv=qv_unpad,
q_descale=q_descale,
k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
softcap=softcap,
pack_gqa=pack_gqa,
num_splits=num_splits,
deterministic=deterministic,
return_attn_probs=True,
)
out = output_pad_fn(out_unpad)
if query_unused_mask is not None:
out.masked_fill_(q_zero_masking, 0.0)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
# if not causal:
# print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most 3x the numerical error
# of a Pytorch implementation.
assert (out - out_ref).abs().max().item() <= rtol * (out_pt - out_ref).abs().max().item() + fwd_atol
if (
not DISABLE_BACKWARD
and dtype != torch.float8_e4m3fn
and not has_qv
and not dv > 256
and not attention_chunk != 0
):
g_unpad = torch.randn_like(out_unpad)
do_o = ((g_unpad.float() * out_unpad.float()).sum(-1)).transpose(-1, -2)
dq_unpad = torch.empty_like(q_unpad)
dk_unpad = torch.empty_like(k_unpad)
dv_unpad = torch.empty_like(v_unpad)
dq_unpad, dk_unpad, dv_unpad, softmax_d = _flash_attn_backward(
g_unpad,
q_unpad,
k_unpad,
v_unpad,
out_unpad,
softmax_lse,
cu_seqlens_q, cu_seqlens_k,
seqused_q, seqused_k,
max_seqlen_q, max_seqlen_k,
dq_unpad,
dk_unpad,
dv_unpad,
d ** (-0.5),
causal,
window_size=window_size,
softcap=softcap,
deterministic=deterministic,
)
dq = dq_pad_fn(dq_unpad)
dk = dk_pad_fn(dk_unpad)
dv = dk_pad_fn(dv_unpad)
if key_unused_mask is not None:
k_zero_masking = rearrange(key_unused_mask, "b s -> b s 1 1")
dk.masked_fill_(k_zero_masking, 0.0)
dv.masked_fill_(k_zero_masking, 0.0)
if query_unused_mask is not None:
dq.masked_fill_(q_zero_masking, 0.0)
# print(f"dO_O max diff: {(softmax_d - do_o).abs().max().item()}")
# assert (softmax_d - do_o).abs().max().item() <= 1e-5
# assert dq_accum.abs().max().item() == 0.0
g = output_pad_fn(g_unpad)
# qk = torch.einsum('bthd,bshd->bhts', q / (d ** 0.5), k).float()
# qk = torch.masked_fill(qk, rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
# dS = torch.einsum('bthd,bshd->bhts', g.float(), v.float())
# P = torch.softmax(qk, -1)
# dP = P * (dS - (g.float() * out.float()).sum(-1).transpose(1, 2).unsqueeze(-1))
# dQ = torch.einsum('bhts,bshd->bthd', dP, k.float())
# dV = torch.einsum('bhts,bthd->bshd', P, g.float())
# dK = torch.einsum('bhts,bthd->bshd', dP, q.float())
# dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
dq_ref, dk_ref, dv_ref = torch.autograd.grad(out_ref, (q_ref, k_ref, v_ref), g)
dq_pt, dk_pt, dv_pt = torch.autograd.grad(out_pt, (q_ref, k_ref, v_ref), g)
print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
# breakpoint()
dq_atol = 2 * (dq_ref + 0.3 - 0.3 - dq_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dq - dq_ref).abs().max().item() <= rtol * (dq_pt - dq_ref).abs().max().item() + dq_atol
dk_atol = 2 * (dk_ref + 0.3 - 0.3 - dk_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dk - dk_ref).abs().max().item() <= rtol * (dk_pt - dk_ref).abs().max().item() + dk_atol
dv_atol = 2 * (dv_ref + 0.3 - 0.3 - dv_ref).abs().max().item() + (0 if softcap == 0 else 3e-4)
assert (dv - dv_ref).abs().max().item() <= rtol * (dv_pt - dv_ref).abs().max().item() + dv_atol
print(dq_unpad.shape)
print(dk_unpad.shape)
print(dv_unpad.shape)
print(dq.shape)
print(dk.shape)
print(dv.shape)
if deterministic:
iterations = 1000
for i in range(iterations):
dq_unpad2 = torch.empty_like(q_unpad)
dk_unpad2 = torch.empty_like(k_unpad)
dv_unpad2 = torch.empty_like(v_unpad)
dq_unpad2, dk_unpad2, dv_unpad2, softmax_d = _flash_attn_backward(
g_unpad,
q_unpad,
k_unpad,
v_unpad,
out_unpad,
softmax_lse,
cu_seqlens_q, cu_seqlens_k,
seqused_q, seqused_k,
max_seqlen_q, max_seqlen_k,
dq_unpad2,
dk_unpad2,
dv_unpad2,
d ** (-0.5),
causal,
window_size=window_size,
softcap=softcap,
deterministic=deterministic,
)
dq2 = dq_pad_fn(dq_unpad2)
dk2 = dk_pad_fn(dk_unpad2)
dv2 = dk_pad_fn(dv_unpad2)
if key_unused_mask is not None:
k_zero_masking = rearrange(key_unused_mask, "b s -> b s 1 1")
dk2.masked_fill_(k_zero_masking, 0.0)
dv2.masked_fill_(k_zero_masking, 0.0)
if query_unused_mask is not None:
dq2.masked_fill_(q_zero_masking, 0.0)
print(f'dq max diff with myself: {(dq2 - dq).abs().max().item()}')
print(f'dk max diff with myself: {(dk2 - dk).abs().max().item()}')
print(f'dv max diff with myself: {(dv2 - dv).abs().max().item()}')
assert torch.equal(dq, dq2), f"dq not deterministic"
assert torch.equal(dk, dk2), f"dk not deterministic"
assert torch.equal(dv, dv2), f"dv not deterministic"
print(f"✅ Iteration {i} passed!") | {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "hopper/test_flash_attn_bwd_determinism.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 662,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Dao-AILab/flash-attention:flash_attn/cute/testing.py | import math
from contextlib import nullcontext
from functools import wraps
from typing import Optional
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from torch._guards import active_fake_mode
from torch._subclasses.fake_tensor import FakeTensorMode
class IndexFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, input, indices):
ctx.save_for_backward(indices)
assert input.ndim >= 2
ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
second_dim = other_shape.numel()
return torch.gather(
rearrange(input, "b ... -> b (...)"),
0,
repeat(indices, "z -> z d", d=second_dim),
).reshape(-1, *other_shape)
@staticmethod
def backward(ctx, grad_output):
(indices,) = ctx.saved_tensors
assert grad_output.ndim >= 2
other_shape = grad_output.shape[1:]
grad_output = rearrange(grad_output, "b ... -> b (...)")
grad_input = torch.zeros(
[ctx.first_axis_dim, grad_output.shape[1]],
device=grad_output.device,
dtype=grad_output.dtype,
)
grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
index_first_axis = IndexFirstAxis.apply
class IndexPutFirstAxis(torch.autograd.Function):
@staticmethod
def forward(ctx, values, indices, first_axis_dim):
ctx.save_for_backward(indices)
assert indices.ndim == 1
assert values.ndim >= 2
output = torch.zeros(
first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
)
output[indices] = values
return output
@staticmethod
def backward(ctx, grad_output):
(indices,) = ctx.saved_tensors
grad_values = grad_output[indices]
return grad_values, None, None
index_put_first_axis = IndexPutFirstAxis.apply
def unpad_input(hidden_states, attention_mask, unused_mask=None):
all_masks = (attention_mask + unused_mask) if unused_mask is not None else attention_mask
seqlens_in_batch = all_masks.sum(dim=-1, dtype=torch.int32)
used_seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
in_fake_mode = active_fake_mode() is not None
if not in_fake_mode:
indices = torch.nonzero(all_masks.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
else:
# torch.nonzero and .item() are not supported in FakeTensorMode
batch_size, seqlen = attention_mask.shape
indices = torch.arange(batch_size * seqlen, device=hidden_states.device)
max_seqlen_in_batch = seqlen
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
return (
index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
indices,
cu_seqlens,
max_seqlen_in_batch,
used_seqlens_in_batch,
)
def pad_input(hidden_states, indices, batch, seqlen):
output = index_put_first_axis(hidden_states, indices, batch * seqlen)
return rearrange(output, "(b s) ... -> b s ...", b=batch)
def generate_random_padding_mask(max_seqlen, batch_size, device, mode="random", zero_lengths=False):
assert mode in ["full", "random", "third"]
if mode == "full":
lengths = torch.full((batch_size, 1), max_seqlen, device=device, dtype=torch.int32)
elif mode == "random":
lengths = torch.randint(
max(0 if zero_lengths else 1, max_seqlen - 20),
max_seqlen + 1,
(batch_size, 1),
device=device,
)
else:
lengths = torch.randint(
max(0 if zero_lengths else 1, max_seqlen // 3),
max_seqlen + 1,
(batch_size, 1),
device=device,
)
if zero_lengths:
for i in range(batch_size):
if i % 5 == 0:
lengths[i] = 0
lengths[-1] = 0
padding_mask = (
repeat(torch.arange(max_seqlen, device=device), "s -> b s", b=batch_size) < lengths
)
return padding_mask
def generate_qkv(
q,
k,
v,
query_padding_mask=None,
key_padding_mask=None,
qv=None,
kvpacked=False,
qkvpacked=False,
query_unused_mask=None,
key_unused_mask=None,
):
assert not (kvpacked and qkvpacked)
batch_size, seqlen_q, nheads, d = q.shape
d_v = v.shape[-1]
_, seqlen_k, nheads_k, _ = k.shape
assert k.shape == (batch_size, seqlen_k, nheads_k, d)
assert v.shape == (batch_size, seqlen_k, nheads_k, d_v)
if query_unused_mask is not None or key_unused_mask is not None:
assert not kvpacked
assert not qkvpacked
if query_padding_mask is not None:
q_unpad, indices_q, cu_seqlens_q, max_seqlen_q, seqused_q = unpad_input(
q, query_padding_mask, query_unused_mask
)
output_pad_fn = lambda output_unpad: pad_input(
output_unpad, indices_q, batch_size, seqlen_q
)
qv_unpad = rearrange(qv, "b s ... -> (b s) ...")[indices_q] if qv is not None else None
else:
q_unpad = rearrange(q, "b s h d -> (b s) h d")
cu_seqlens_q = torch.arange(
0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q_unpad.device
)
seqused_q = None
max_seqlen_q = seqlen_q
output_pad_fn = lambda output_unpad: rearrange(
output_unpad, "(b s) h d -> b s h d", b=batch_size
)
qv_unpad = rearrange(qv, "b s ... -> (b s) ...") if qv is not None else None
if key_padding_mask is not None:
k_unpad, indices_k, cu_seqlens_k, max_seqlen_k, seqused_k = unpad_input(
k, key_padding_mask, key_unused_mask
)
v_unpad, *_ = unpad_input(v, key_padding_mask, key_unused_mask)
else:
k_unpad = rearrange(k, "b s h d -> (b s) h d")
v_unpad = rearrange(v, "b s h d -> (b s) h d")
cu_seqlens_k = torch.arange(
0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=k_unpad.device
)
seqused_k = None
max_seqlen_k = seqlen_k
if qkvpacked:
assert (query_padding_mask == key_padding_mask).all()
assert nheads == nheads_k
qkv_unpad = torch.stack([q_unpad, k_unpad, v_unpad], dim=1)
qkv = torch.stack([q, k, v], dim=2)
if query_padding_mask is not None:
dqkv_pad_fn = lambda dqkv_unpad: pad_input(dqkv_unpad, indices_q, batch_size, seqlen_q)
else:
dqkv_pad_fn = lambda dqkv_unpad: rearrange(
dqkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
)
return (
qkv_unpad.detach().requires_grad_(),
cu_seqlens_q,
max_seqlen_q,
qkv.detach().requires_grad_(),
output_pad_fn,
dqkv_pad_fn,
)
elif kvpacked:
kv_unpad = torch.stack([k_unpad, v_unpad], dim=1)
kv = torch.stack([k, v], dim=2)
dq_pad_fn = output_pad_fn
if key_padding_mask is not None:
dkv_pad_fn = lambda dkv_unpad: pad_input(dkv_unpad, indices_k, batch_size, seqlen_k)
else:
dkv_pad_fn = lambda dkv_unpad: rearrange(
dkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
)
return (
q_unpad.detach().requires_grad_(),
kv_unpad.detach().requires_grad_(),
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
q.detach().requires_grad_(),
kv.detach().requires_grad_(),
output_pad_fn,
dq_pad_fn,
dkv_pad_fn,
)
else:
dq_pad_fn = output_pad_fn
if key_padding_mask is not None:
dk_pad_fn = lambda dk_unpad: pad_input(dk_unpad, indices_k, batch_size, seqlen_k)
else:
dk_pad_fn = lambda dk_unpad: rearrange(dk_unpad, "(b s) h d -> b s h d", b=batch_size)
return (
q_unpad.detach().requires_grad_(),
k_unpad.detach().requires_grad_(),
v_unpad.detach().requires_grad_(),
qv_unpad.detach() if qv is not None else None,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
q.detach().requires_grad_(),
k.detach().requires_grad_(),
v.detach().requires_grad_(),
qv.detach() if qv is not None else None,
output_pad_fn,
dq_pad_fn,
dk_pad_fn,
)
def construct_local_mask(
seqlen_q,
seqlen_k,
window_size=(None, None),
sink_token_length=0,
query_padding_mask=None,
key_padding_mask=None,
key_leftpad=None,
device=None,
):
row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
if key_leftpad is not None:
key_leftpad = rearrange(key_leftpad, "b -> b 1 1 1")
col_idx = repeat(col_idx, "s -> b 1 1 s", b=key_leftpad.shape[0])
col_idx = torch.where(col_idx >= key_leftpad, col_idx - key_leftpad, 2**32)
sk = (
seqlen_k
if key_padding_mask is None
else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
)
sq = (
seqlen_q
if query_padding_mask is None
else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
)
if window_size[0] is None:
return col_idx > row_idx + sk - sq + window_size[1]
else:
sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
if window_size[1] is None:
local_mask_left = col_idx > sk
else:
local_mask_left = col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk)
return torch.logical_or(
local_mask_left,
torch.logical_and(
col_idx < row_idx + sk - sq - window_size[0], col_idx >= sink_token_length
),
)
def construct_chunk_mask(
seqlen_q,
seqlen_k,
attention_chunk,
query_padding_mask=None,
key_padding_mask=None,
key_leftpad=None,
device=None,
):
row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
if key_leftpad is not None:
key_leftpad = rearrange(key_leftpad, "b -> b 1 1 1")
col_idx = repeat(col_idx, "s -> b 1 1 s", b=key_leftpad.shape[0])
col_idx = torch.where(col_idx >= key_leftpad, col_idx - key_leftpad, 2**32)
sk = (
seqlen_k
if key_padding_mask is None
else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
)
sq = (
seqlen_q
if query_padding_mask is None
else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
)
sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
col_limit_left_chunk = row_idx + sk - sq - (row_idx + sk - sq) % attention_chunk
return torch.logical_or(
col_idx < col_limit_left_chunk, col_idx >= col_limit_left_chunk + attention_chunk
)
def attention_ref(
q,
k,
v,
query_padding_mask=None,
key_padding_mask=None,
key_leftpad=None,
attn_bias=None,
dropout_p=0.0,
dropout_mask=None,
causal=False,
qv=None,
q_descale=None,
k_descale=None,
v_descale=None,
window_size=(None, None),
attention_chunk=0,
sink_token_length=0,
learnable_sink: Optional[torch.Tensor] = None,
softcap=0.0,
upcast=True,
reorder_ops=False,
intermediate_dtype=None,
):
if causal:
window_size = (window_size[0], 0)
dtype_og = q.dtype
if upcast:
q, k, v = q.float(), k.float(), v.float()
qv = qv.float() if qv is not None else None
if q_descale is not None:
q_descale = repeat(q_descale, "b h -> b 1 (h g) 1", g=q.shape[2] // k.shape[2])
q = (q.float() * q_descale).to(q.dtype)
qv = (qv.float() * q_descale).to(qv.dtype) if qv is not None else None
if k_descale is not None:
k = (k.float() * rearrange(k_descale, "b h -> b 1 h 1")).to(dtype=k.dtype)
if v_descale is not None:
v = (v.float() * rearrange(v_descale, "b h -> b 1 h 1")).to(dtype=v.dtype)
seqlen_q, seqlen_k = q.shape[1], k.shape[1]
k = repeat(k, "b s h d -> b s (h g) d", g=q.shape[2] // k.shape[2])
v = repeat(v, "b s h d -> b s (h g) d", g=q.shape[2] // v.shape[2])
d = q.shape[-1]
dv = v.shape[-1]
softmax_scale = 1.0 / math.sqrt(d if qv is None else d + dv)
if not reorder_ops:
scores = torch.einsum("bthd,bshd->bhts", q * softmax_scale, k)
else:
scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
if qv is not None:
scores = scores + torch.einsum("bthd,bshd->bhts", qv * softmax_scale, v)
if softcap > 0:
scores = torch.tanh(scores / softcap) * softcap
if key_padding_mask is not None:
scores.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
local_mask = None
if window_size[0] is not None or window_size[1] is not None:
local_mask = construct_local_mask(
seqlen_q,
seqlen_k,
window_size,
sink_token_length,
query_padding_mask,
key_padding_mask,
key_leftpad=key_leftpad,
device=q.device,
)
if attention_chunk > 0:
chunk_mask = construct_chunk_mask(
seqlen_q,
seqlen_k,
attention_chunk,
query_padding_mask,
key_padding_mask,
key_leftpad=key_leftpad,
device=q.device,
)
local_mask = (
torch.logical_or(local_mask, chunk_mask) if local_mask is not None else chunk_mask
)
if local_mask is not None:
scores.masked_fill_(local_mask, float("-inf"))
if attn_bias is not None:
scores = scores + attn_bias
if learnable_sink is None:
attention = torch.softmax(scores, dim=-1).to(v.dtype)
else:
scores_fp32 = scores.to(torch.float32)
logits_max = torch.amax(scores_fp32, dim=-1, keepdim=True)
learnable_sink = rearrange(learnable_sink, "h -> h 1 1")
logits_or_sinks_max = torch.maximum(learnable_sink, logits_max)
unnormalized_scores = torch.exp(scores_fp32 - logits_or_sinks_max)
normalizer = unnormalized_scores.sum(dim=-1, keepdim=True) + torch.exp(
learnable_sink - logits_or_sinks_max
)
attention = (unnormalized_scores / normalizer).to(v.dtype)
if query_padding_mask is not None:
attention = attention.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
if key_padding_mask is not None:
attention = attention.masked_fill(rearrange(~key_padding_mask, "b s -> b 1 1 s"), 0.0)
if local_mask is not None:
attention = attention.masked_fill(torch.all(local_mask, dim=-1, keepdim=True), 0.0)
dropout_scaling = 1.0 / (1 - dropout_p)
if dropout_mask is not None:
attention_drop = attention.masked_fill(~dropout_mask, 0.0)
else:
attention_drop = attention
if intermediate_dtype is not None:
attention_drop = attention_drop.to(intermediate_dtype).to(attention_drop.dtype)
output = torch.einsum("bhts,bshd->bthd", attention_drop, v * dropout_scaling)
if query_padding_mask is not None:
output.masked_fill_(rearrange(~query_padding_mask, "b s -> b s 1 1"), 0.0)
return output.to(dtype=dtype_og), attention.to(dtype=dtype_og)
def maybe_fake_tensor_mode(fake: bool = True):
"""
One way to populate/pre-compile cache is to use torch fake tensor mode,
which does not allocate actual GPU tensors but retains tensor shape/dtype
metadata for cute.compile.
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with FakeTensorMode() if fake else nullcontext():
return fn(*args, **kwargs)
return wrapper
return decorator
def is_fake_mode() -> bool:
return active_fake_mode() is not None
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/testing.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 420,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/cute/flash_bwd_sm90.py | import math
from typing import Callable, Optional, Type
from functools import partial
import cuda.bindings.driver as cuda
import cutlass
import cutlass.cute as cute
import cutlass.utils.hopper_helpers as sm90_utils_basic
from cutlass.cute.nvgpu import cpasync, warpgroup
from cutlass.cute import FastDivmodDivisor
from cutlass import Float32, Int32, Boolean, const_expr
from cutlass.utils import LayoutEnum
from quack import copy_utils
from quack import layout_utils
from quack import sm90_utils
from quack.sm90_utils import gemm_zero_init, gemm_w_idx
from flash_attn.cute.cute_dsl_utils import assume_tensor_aligned
from flash_attn.cute import utils
from flash_attn.cute.mask import AttentionMask
from flash_attn.cute.seqlen_info import SeqlenInfoQK
from flash_attn.cute.block_info import BlockInfo
from flash_attn.cute import pipeline
from quack.cute_dsl_utils import ParamsBase
from flash_attn.cute.tile_scheduler import TileSchedulerArguments, SingleTileScheduler
from flash_attn.cute.named_barrier import NamedBarrierBwd
from flash_attn.cute.softmax import apply_score_mod_inner, apply_score_mod_bwd_inner
from flash_attn.cute.block_sparsity import BlockSparseTensors
from flash_attn.cute.block_sparse_utils import (
get_total_q_block_count_bwd,
produce_block_sparse_q_loads_bwd_sm90,
consume_block_sparse_mma_bwd_sm90,
dQaccum_store_block_sparse_bwd_sm90,
)
class FlashAttentionBackwardSm90:
arch = 90
def __init__(
self,
dtype: Type[cutlass.Numeric],
head_dim: int,
head_dim_v: Optional[int] = None,
qhead_per_kvhead: int = 1,
is_causal: bool = False,
tile_m: int = 64,
tile_n: int = 128,
Q_stage: int = 2,
dO_stage: int = 2,
PdS_stage: int = 2,
SdP_swapAB: bool = False,
dKV_swapAB: bool = False,
dQ_swapAB: bool = False,
AtomLayoutMSdP: int = 1,
AtomLayoutNdKV: int = 2,
AtomLayoutMdQ: int = 1,
num_threads: int = 384,
V_in_regs: bool = False,
score_mod: cutlass.Constexpr | None = None,
score_mod_bwd: cutlass.Constexpr | None = None,
mask_mod: cutlass.Constexpr | None = None,
has_aux_tensors: cutlass.Constexpr = False,
subtile_factor: cutlass.Constexpr[int] = 1,
):
self.dtype = dtype
# padding head_dim to a multiple of 16 as k_block_size
hdim_multiple_of = 16
self.tile_hdim = int(math.ceil(head_dim / hdim_multiple_of) * hdim_multiple_of)
head_dim_v = head_dim_v if head_dim_v is not None else head_dim
self.same_hdim_kv = head_dim == head_dim_v
self.tile_hdimv = int(math.ceil(head_dim_v / hdim_multiple_of) * hdim_multiple_of)
# Can save registers (and hence be faster) if we don't have to check hdim predication
self.check_hdim_oob = head_dim != self.tile_hdim
self.check_hdim_v_oob = head_dim_v != self.tile_hdimv
self.qhead_per_kvhead = qhead_per_kvhead
self.is_causal = is_causal
self.is_local = False
self.tile_m = tile_m
self.tile_n = tile_n
self.num_threads = num_threads
self.Q_stage = Q_stage
self.dO_stage = dO_stage
self.PdS_stage = PdS_stage
assert self.dO_stage in [1, self.Q_stage]
assert self.PdS_stage in [1, self.Q_stage]
self.SdP_swapAB = SdP_swapAB
self.dKV_swapAB = dKV_swapAB
self.dQ_swapAB = dQ_swapAB
self.AtomLayoutMSdP = AtomLayoutMSdP
self.AtomLayoutNdKV = AtomLayoutNdKV
self.AtomLayoutMdQ = AtomLayoutMdQ
self.num_mma_warp_groups = (self.num_threads // 128) - 1
self.mma_dkv_is_rs = (
AtomLayoutMSdP == 1
and AtomLayoutNdKV == self.num_mma_warp_groups
and SdP_swapAB
and not dKV_swapAB
)
self.V_in_regs = V_in_regs
if qhead_per_kvhead > 1:
assert self.same_hdim_kv, "GQA backward requires head_dim == head_dim_v"
assert self.num_mma_warp_groups == 2, "GQA backward assumes 2 warp groups"
# These are tuned for speed
# Do we keep the LSE and dPsum in each thread, or split them across 8 threads that share
# them and then shuffle to get the value whenever we need? This can reduce register
# pressure when SdP_swapAB, where each thread needs to keep statistics for (kBlockM / 4)
# rows. If !SdP_swapAB, each thread only needs to keep statistics for 2 rows.
# TODO: impl these for hdim 64
self.shuffle_LSE = self.SdP_swapAB and self.tile_hdim <= 64
self.shuffle_dPsum = self.SdP_swapAB and self.tile_hdim <= 64
self.buffer_align_bytes = 1024
self.score_mod = score_mod
self.score_mod_bwd = score_mod_bwd
self.mask_mod = mask_mod
self.has_aux_tensors = has_aux_tensors
self.subtile_factor = subtile_factor
if cutlass.const_expr(has_aux_tensors):
self.vec_size: cutlass.Constexpr = 1
else:
self.vec_size: cutlass.Constexpr = 4
self.qk_acc_dtype = Float32
@staticmethod
def can_implement(
dtype,
head_dim,
head_dim_v,
tile_m,
tile_n,
Q_stage,
num_threads,
V_in_regs=False,
) -> bool:
if dtype not in [cutlass.Float16, cutlass.BFloat16]:
return False
if head_dim % 8 != 0:
return False
if head_dim_v % 8 != 0:
return False
if tile_n % 16 != 0:
return False
if num_threads % 32 != 0:
return False
if (tile_m * 2) % num_threads != 0:
return False
return True
def _check_type(
self,
mQ_type: Type[cutlass.Numeric],
mK_type: Type[cutlass.Numeric],
mV_type: Type[cutlass.Numeric],
mdO_type: Type[cutlass.Numeric],
mLSE_type: Type[cutlass.Numeric],
mdPsum_type: Type[cutlass.Numeric],
mdQaccum_type: Type[cutlass.Numeric],
mdK_type: Type[cutlass.Numeric],
mdV_type: Type[cutlass.Numeric],
):
# Get the data type and check if it is fp16 or bf16
if const_expr(not (mQ_type == mK_type == mV_type == mdO_type)):
raise TypeError("All tensors must have the same data type")
if const_expr(mQ_type not in [cutlass.Float16, cutlass.BFloat16]):
raise TypeError("Only Float16 or BFloat16 is supported")
if const_expr(mLSE_type not in [Float32]):
raise TypeError("LSE tensor must be Float32")
if const_expr(mdPsum_type not in [Float32]):
raise TypeError("dPsum tensor must be Float32")
if const_expr(mdQaccum_type not in [Float32]):
raise TypeError("dQaccum tensor must be Float32")
if const_expr(self.qhead_per_kvhead == 1):
if const_expr(not (mdK_type == mdV_type == mQ_type)):
raise TypeError("mdK and mdV tensors must have the same data type as mQ")
else:
if const_expr(not (mdK_type == mdV_type == Float32)):
raise TypeError("mdKaccum and mdVaccum tensors must have the data type Float32")
assert mQ_type == self.dtype
def _setup_attributes(self):
self.sQ_layout, self.sK_layout, self.sV_layout, self.sdO_layout, self.sPdS_layout = [
sm90_utils.make_smem_layout(self.dtype, LayoutEnum.ROW_MAJOR, shape, stage)
for shape, stage in [
((self.tile_m, self.tile_hdim), self.Q_stage),
((self.tile_n, self.tile_hdim), None),
((self.tile_n, self.tile_hdimv), None),
((self.tile_m, self.tile_hdimv), self.dO_stage),
((self.tile_m, self.tile_n), self.PdS_stage),
]
]
self.sdQaccum_layout = cute.make_layout(
(self.tile_m * self.tile_hdim // self.num_mma_warp_groups, self.num_mma_warp_groups)
)
# dQaccum R->S
self.r2s_tiled_copy_dQaccum = cute.make_tiled_copy_tv(
cute.make_copy_atom(cute.nvgpu.CopyUniversalOp(), Float32, num_bits_per_copy=128),
# thr_layout
cute.make_layout((self.num_threads_per_warp_group, self.num_mma_warp_groups)),
cute.make_layout(128 // Float32.width), # val_layout
)
# dKVaccum for GQA epilogue - reuses sV+sK memory recast as f32
# TODO: assert that sVaccum and sKaccum don't overflow smem
def _get_tiled_mma(self):
# S = Q @ K.T, dP = dO @ V.T
atom_layout_SdP = (self.AtomLayoutMSdP, self.num_mma_warp_groups // self.AtomLayoutMSdP)
tiler_mn_SdP = (self.tile_m // atom_layout_SdP[0], self.tile_n // atom_layout_SdP[1])
tiled_mma_SdP = sm90_utils_basic.make_trivial_tiled_mma(
self.dtype,
self.dtype,
warpgroup.OperandMajorMode.K,
warpgroup.OperandMajorMode.K,
Float32,
atom_layout_mnk=(atom_layout_SdP if not self.SdP_swapAB else atom_layout_SdP[::-1])
+ (1,),
tiler_mn=tiler_mn_SdP if not self.SdP_swapAB else tiler_mn_SdP[::-1],
)
# dV = P.T @ dO, dK = dS.T @ Q
atom_layout_dKV = (self.AtomLayoutNdKV, self.num_mma_warp_groups // self.AtomLayoutNdKV)
tiler_mn_dK = (self.tile_n // atom_layout_dKV[0], self.tile_hdim // atom_layout_dKV[1])
tiler_mn_dV = (self.tile_n // atom_layout_dKV[0], self.tile_hdimv // atom_layout_dKV[1])
tiled_mma_dK, tiled_mma_dV = [
sm90_utils_basic.make_trivial_tiled_mma(
self.dtype,
self.dtype,
warpgroup.OperandMajorMode.MN
if not self.mma_dkv_is_rs
else warpgroup.OperandMajorMode.K,
warpgroup.OperandMajorMode.MN,
Float32,
atom_layout_mnk=(atom_layout_dKV if not self.dKV_swapAB else atom_layout_dKV[::-1])
+ (1,),
tiler_mn=tiler_mn_d if not self.dKV_swapAB else tiler_mn_d[::-1],
a_source=warpgroup.OperandSource.RMEM
if self.mma_dkv_is_rs
else warpgroup.OperandSource.SMEM,
)
for tiler_mn_d in (tiler_mn_dK, tiler_mn_dV)
]
# dQ = dS @ K
atom_layout_dQ = (self.AtomLayoutMdQ, self.num_mma_warp_groups // self.AtomLayoutMdQ)
tiler_mn_dQ = (self.tile_m // atom_layout_dQ[0], self.tile_hdim // atom_layout_dQ[1])
tiled_mma_dQ = sm90_utils_basic.make_trivial_tiled_mma(
self.dtype,
self.dtype,
warpgroup.OperandMajorMode.K if not self.dQ_swapAB else warpgroup.OperandMajorMode.MN,
warpgroup.OperandMajorMode.MN if not self.dQ_swapAB else warpgroup.OperandMajorMode.K,
Float32,
atom_layout_mnk=(atom_layout_dQ if not self.dQ_swapAB else atom_layout_dQ[::-1]) + (1,),
tiler_mn=tiler_mn_dQ if not self.dQ_swapAB else tiler_mn_dQ[::-1],
)
return tiled_mma_SdP, tiled_mma_dK, tiled_mma_dV, tiled_mma_dQ
def _get_shared_storage_cls(self):
sQ_struct, sK_struct, sV_struct, sdO_struct, sdQaccum_struct = [
cute.struct.Align[cute.struct.MemRange[t, cute.cosize(layout)], self.buffer_align_bytes]
for (layout, t) in [
(self.sQ_layout, self.dtype),
(self.sK_layout, self.dtype),
(self.sV_layout, self.dtype),
(self.sdO_layout, self.dtype),
(self.sdQaccum_layout, Float32),
]
]
cosize_sdS = cute.cosize(self.sPdS_layout)
cosize_sP = cute.cosize(self.sPdS_layout) if const_expr(not self.mma_dkv_is_rs) else 0
sLSE_struct = cute.struct.Align[
cute.struct.MemRange[Float32, cute.round_up(self.tile_m, 64) * self.Q_stage], 128
]
sdPsum_struct = cute.struct.Align[
cute.struct.MemRange[Float32, cute.round_up(self.tile_m, 64) * self.dO_stage], 128
]
@cute.struct
class SharedStorageQKV:
mbar_ptr_Q: cute.struct.MemRange[cutlass.Int64, self.Q_stage * 2]
mbar_ptr_dO: cute.struct.MemRange[cutlass.Int64, self.dO_stage * 2]
sLSE: sLSE_struct
sdPsum: sdPsum_struct
sQ: sQ_struct
sV: sV_struct
sK: sK_struct
sdO: sdO_struct
sP: cute.struct.Align[cute.struct.MemRange[self.dtype, cosize_sP], 1024]
sdS: cute.struct.Align[cute.struct.MemRange[self.dtype, cosize_sdS], 1024]
sdQaccum: sdQaccum_struct
return SharedStorageQKV
@cute.jit
def __call__(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mdO: cute.Tensor,
mLSE: cute.Tensor,
mdPsum: cute.Tensor,
mdQaccum: cute.Tensor,
mdK: cute.Tensor,
mdV: cute.Tensor,
softmax_scale: Float32,
stream: cuda.CUstream,
mCuSeqlensQ: Optional[cute.Tensor] = None,
mCuSeqlensK: Optional[cute.Tensor] = None,
mSeqUsedQ: Optional[cute.Tensor] = None,
mSeqUsedK: Optional[cute.Tensor] = None,
softcap: Float32 | float | None = None,
window_size_left: Int32 | int | None = None,
window_size_right: Int32 | int | None = None,
mdQ_semaphore: Optional[cute.Tensor] = None,
mdK_semaphore: Optional[cute.Tensor] = None,
mdV_semaphore: Optional[cute.Tensor] = None,
aux_tensors: Optional[list] = None,
blocksparse_tensors: Optional[BlockSparseTensors] = None,
):
assert mdQ_semaphore is None and mdK_semaphore is None and mdV_semaphore is None, (
"determinism not supported yet for Sm90"
)
self._check_type(
*(
t.element_type if t is not None else None
for t in (mQ, mK, mV, mdO, mLSE, mdPsum, mdQaccum, mdK, mdV)
)
)
mQ, mK, mV, mdO, mLSE, mdPsum, mdQaccum, mdK, mdV = [
assume_tensor_aligned(t) for t in (mQ, mK, mV, mdO, mLSE, mdPsum, mdQaccum, mdK, mdV)
]
layout_transpose = [1, 3, 2, 0] # (b, s, n, h) --> (s, h, n, b)
mQ, mK, mV, mdO = [layout_utils.select(t, layout_transpose) for t in (mQ, mK, mV, mdO)]
if const_expr(self.qhead_per_kvhead == 1):
mdK, mdV = [layout_utils.select(t, layout_transpose) for t in (mdK, mdV)]
else:
accum_transpose = [2, 1, 0] # (b, n, s*h) -> (s*h, n, b)
mdK, mdV = [layout_utils.select(t, accum_transpose) for t in (mdK, mdV)]
LSE_dPsum_dQaccum_transpose = [2, 1, 0] # (b, n, s) -> (s, n, b)
mLSE, mdPsum, mdQaccum = [
layout_utils.select(t, LSE_dPsum_dQaccum_transpose) for t in (mLSE, mdPsum, mdQaccum)
]
tiled_mma_SdP, tiled_mma_dK, tiled_mma_dV, tiled_mma_dQ = self._get_tiled_mma()
self.num_mma_threads = tiled_mma_SdP.size
assert self.num_mma_threads + 128 == self.num_threads
self.num_threads_per_warp_group = 128
self.num_producer_threads = 32
self.num_mma_regs = 240
self.num_producer_regs = 24
# self.num_mma_regs = 232
# self.num_producer_regs = 40
self._setup_attributes()
SharedStorage = self._get_shared_storage_cls()
self.tma_copy_bytes = {
name: cute.size_in_bytes(mX.element_type, cute.select(layout, mode=[0, 1]))
for name, mX, layout in [
("Q", mQ, self.sQ_layout),
("K", mK, self.sK_layout),
("V", mV, self.sV_layout),
("dO", mdO, self.sdO_layout),
]
}
self.tma_copy_bytes["LSE"] = self.tile_m * Float32.width // 8
self.tma_copy_bytes["dPsum"] = self.tile_m * Float32.width // 8
self.tma_copy_bytes["dQ"] = (
self.tile_m * self.tile_hdim * Float32.width // 8 // self.num_mma_warp_groups
)
self.tma_copy_bytes["dKacc"] = self.tile_n * self.tile_hdim * Float32.width // 8
self.tma_copy_bytes["dVacc"] = self.tile_n * self.tile_hdimv * Float32.width // 8
tma_atom_Q, tma_tensor_Q = cpasync.make_tiled_tma_atom(
cpasync.CopyBulkTensorTileG2SOp(),
mQ,
cute.select(self.sQ_layout, mode=[0, 1]),
(self.tile_m, self.tile_hdim),
)
tma_atom_K, tma_tensor_K = cpasync.make_tiled_tma_atom(
cpasync.CopyBulkTensorTileG2SOp(),
mK,
cute.select(self.sK_layout, mode=[0, 1]),
(self.tile_n, self.tile_hdim),
)
tma_atom_V, tma_tensor_V = cpasync.make_tiled_tma_atom(
cpasync.CopyBulkTensorTileG2SOp(),
mV,
cute.select(self.sV_layout, mode=[0, 1]),
(self.tile_n, self.tile_hdimv),
)
tma_atom_dO, tma_tensor_dO = cpasync.make_tiled_tma_atom(
cpasync.CopyBulkTensorTileG2SOp(),
mdO,
cute.select(self.sdO_layout, mode=[0, 1]),
(self.tile_m, self.tile_hdimv),
)
if const_expr(self.qhead_per_kvhead == 1):
tma_atom_dK, tma_tensor_dK = cpasync.make_tiled_tma_atom(
cpasync.CopyBulkTensorTileS2GOp(),
mdK,
cute.select(self.sK_layout, mode=[0, 1]),
(self.tile_n, self.tile_hdim),
)
tma_atom_dV, tma_tensor_dV = cpasync.make_tiled_tma_atom(
cpasync.CopyBulkTensorTileS2GOp(),
mdV,
cute.select(self.sV_layout, mode=[0, 1]),
(self.tile_n, self.tile_hdimv),
)
else:
tma_atom_dK = tma_atom_dV = tma_tensor_dK = tma_tensor_dV = None
TileScheduler = SingleTileScheduler
tile_sched_args = TileSchedulerArguments(
cute.ceil_div(cute.size(mK.shape[0]), self.tile_n),
cute.size(mQ.shape[2]),
cute.size(mQ.shape[3]),
1, # num_splits
cute.size(mK.shape[0]),
mQ.shape[1],
mV.shape[1],
total_q=cute.size(mQ.shape[0]) * cute.size(mQ.shape[3]),
tile_shape_mn=(self.tile_m, self.tile_n),
mCuSeqlensQ=None,
mSeqUsedQ=None,
qhead_per_kvhead_packgqa=1,
element_size=self.dtype.width // 8,
is_persistent=False,
lpt=False,
)
tile_sched_params = TileScheduler.to_underlying_arguments(tile_sched_args)
grid_dim = TileScheduler.get_grid_shape(tile_sched_params)
LOG2_E = math.log2(math.e)
if const_expr(self.score_mod is None):
softmax_scale_log2 = softmax_scale * LOG2_E
else:
softmax_scale_log2 = LOG2_E
fastdiv_mods = None
if const_expr(aux_tensors is not None):
seqlen_q = cute.size(mQ.shape[0])
seqlen_k = cute.size(mK.shape[0])
seqlen_q_divmod = FastDivmodDivisor(seqlen_q)
seqlen_k_divmod = FastDivmodDivisor(seqlen_k)
fastdiv_mods = (seqlen_q_divmod, seqlen_k_divmod)
qhead_per_kvhead_divmod = None
if const_expr(self.qhead_per_kvhead > 1):
qhead_per_kvhead_divmod = FastDivmodDivisor(self.qhead_per_kvhead)
self.use_block_sparsity = cutlass.const_expr(blocksparse_tensors is not None)
self.kernel(
tma_tensor_Q,
tma_tensor_K,
tma_tensor_V,
tma_tensor_dO,
tma_tensor_dK if const_expr(self.qhead_per_kvhead == 1) else mdK,
tma_tensor_dV if const_expr(self.qhead_per_kvhead == 1) else mdV,
tma_atom_Q,
tma_atom_K,
tma_atom_V,
tma_atom_dO,
tma_atom_dK,
tma_atom_dV,
mLSE,
mdPsum,
mdQaccum,
self.sQ_layout,
self.sK_layout,
self.sV_layout,
self.sPdS_layout,
self.sdO_layout,
self.sdQaccum_layout,
self.r2s_tiled_copy_dQaccum,
tiled_mma_SdP,
tiled_mma_dK,
tiled_mma_dV,
tiled_mma_dQ,
softmax_scale_log2,
softmax_scale,
tile_sched_params,
TileScheduler,
SharedStorage,
aux_tensors,
fastdiv_mods,
blocksparse_tensors,
qhead_per_kvhead_divmod,
).launch(
grid=grid_dim,
block=[self.num_threads, 1, 1],
stream=stream,
min_blocks_per_mp=1,
)
@cute.kernel
def kernel(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mdO: cute.Tensor,
mdK: cute.Tensor,
mdV: cute.Tensor,
tma_atom_Q: cute.CopyAtom,
tma_atom_K: cute.CopyAtom,
tma_atom_V: cute.CopyAtom,
tma_atom_dO: cute.CopyAtom,
tma_atom_dK: cute.CopyAtom,
tma_atom_dV: cute.CopyAtom,
mLSE: cute.Tensor,
mdPsum: cute.Tensor,
mdQaccum: cute.Tensor,
sQ_layout: cute.ComposedLayout,
sK_layout: cute.ComposedLayout,
sV_layout: cute.ComposedLayout,
sPdS_layout: cute.ComposedLayout,
sdO_layout: cute.ComposedLayout,
sdQaccum_layout: cute.Layout,
r2s_tiled_copy_dQaccum: cute.TiledCopy,
tiled_mma_SdP: cute.TiledMma,
tiled_mma_dK: cute.TiledMma,
tiled_mma_dV: cute.TiledMma,
tiled_mma_dQ: cute.TiledMma,
softmax_scale_log2,
softmax_scale,
tile_sched_params: ParamsBase,
TileScheduler: cutlass.Constexpr[Callable],
SharedStorage: cutlass.Constexpr[Callable],
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
blocksparse_tensors: Optional[BlockSparseTensors] = None,
qhead_per_kvhead_divmod: Optional[FastDivmodDivisor] = None,
):
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx())
# prefetch TMA descriptors
if warp_idx == 0:
cpasync.prefetch_descriptor(tma_atom_Q)
cpasync.prefetch_descriptor(tma_atom_K)
cpasync.prefetch_descriptor(tma_atom_V)
cpasync.prefetch_descriptor(tma_atom_dO)
smem = cutlass.utils.SmemAllocator()
storage = smem.allocate(SharedStorage)
pipeline_producer_group = cutlass.pipeline.CooperativeGroup(cutlass.pipeline.Agent.Thread)
pipeline_consumer_group = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread, self.num_mma_threads // cute.arch.WARP_SIZE
)
pipeline_Q = pipeline.PipelineTmaAsync.create(
barrier_storage=storage.mbar_ptr_Q.data_ptr(),
num_stages=self.Q_stage,
producer_group=pipeline_producer_group,
consumer_group=pipeline_consumer_group,
tx_count=self.tma_copy_bytes["Q"] + self.tma_copy_bytes["LSE"],
defer_sync=True,
)
pipeline_dO = pipeline.PipelineTmaAsync.create(
barrier_storage=storage.mbar_ptr_dO.data_ptr(),
num_stages=self.dO_stage,
producer_group=pipeline_producer_group,
consumer_group=pipeline_consumer_group,
tx_count=self.tma_copy_bytes["dO"] + self.tma_copy_bytes["dPsum"],
defer_sync=False,
)
sQ = storage.sQ.get_tensor(sQ_layout.outer, swizzle=sQ_layout.inner)
sdO = storage.sdO.get_tensor(sdO_layout.outer, swizzle=sdO_layout.inner)
sK = storage.sK.get_tensor(sK_layout.outer, swizzle=sK_layout.inner)
sV = storage.sV.get_tensor(sV_layout.outer, swizzle=sV_layout.inner)
sP = None
if const_expr(not self.mma_dkv_is_rs):
sP = storage.sP.get_tensor(sPdS_layout.outer, swizzle=sPdS_layout.inner)
sdS = storage.sdS.get_tensor(sPdS_layout.outer, swizzle=sPdS_layout.inner)
sLSE = storage.sLSE.get_tensor(
cute.make_layout(
(self.tile_m, self.Q_stage),
stride=(1, cute.round_up(self.tile_m, 64)),
)
)
sdPsum = storage.sdPsum.get_tensor(
cute.make_layout(
(self.tile_m, self.dO_stage),
stride=(1, cute.round_up(self.tile_m, 64)),
)
)
sdQaccum = storage.sdQaccum.get_tensor(sdQaccum_layout)
block_info = BlockInfo(
self.tile_m,
self.tile_n,
self.is_causal,
self.is_local,
False, # is_split_kv
None,
None,
qhead_per_kvhead_packgqa=1,
)
SeqlenInfoCls = partial(
SeqlenInfoQK.create,
seqlen_q_static=mQ.shape[0],
seqlen_k_static=mK.shape[0],
mCuSeqlensQ=None,
mCuSeqlensK=None,
mSeqUsedQ=None,
mSeqUsedK=None,
)
AttentionMaskCls = partial(
AttentionMask,
self.tile_m,
self.tile_n,
window_size_left=None,
window_size_right=None,
swap_AB=self.SdP_swapAB,
)
TileSchedulerCls = partial(TileScheduler.create, tile_sched_params)
if warp_idx < 4:
cute.arch.setmaxregister_decrease(self.num_producer_regs)
if warp_idx == 0:
self.load(
mQ,
mK,
mV,
mdO,
mLSE,
mdPsum,
sQ,
sK,
sV,
sdO,
sLSE,
sdPsum,
tma_atom_Q,
tma_atom_K,
tma_atom_V,
tma_atom_dO,
pipeline_Q,
pipeline_dO,
block_info,
SeqlenInfoCls,
TileSchedulerCls,
blocksparse_tensors,
qhead_per_kvhead_divmod,
)
if warp_idx == 1:
self.dQaccum_store(
mdQaccum,
sdQaccum,
block_info,
TileSchedulerCls,
SeqlenInfoCls,
blocksparse_tensors,
)
else:
cute.arch.setmaxregister_increase(self.num_mma_regs)
tidx, _, _ = cute.arch.thread_idx()
tidx = tidx - 128
self.mma(
tiled_mma_SdP,
tiled_mma_dK,
tiled_mma_dV,
tiled_mma_dQ,
mdK,
mdV,
mdQaccum,
sQ,
sK,
sV,
sdO,
sP,
sdS,
sLSE,
sdPsum,
sdQaccum,
pipeline_Q,
pipeline_dO,
tidx,
tma_atom_dK,
tma_atom_dV,
r2s_tiled_copy_dQaccum,
softmax_scale_log2,
softmax_scale,
block_info,
SeqlenInfoCls,
AttentionMaskCls,
TileSchedulerCls,
aux_tensors,
fastdiv_mods,
blocksparse_tensors,
qhead_per_kvhead_divmod,
)
@cute.jit
def load(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mdO: cute.Tensor,
mLSE: cute.Tensor,
mdPsum: cute.Tensor,
sQ: cute.Tensor,
sK: cute.Tensor,
sV: cute.Tensor,
sdO: cute.Tensor,
sLSE: cute.Tensor,
sdPsum: cute.Tensor,
tma_atom_Q: cute.CopyAtom,
tma_atom_K: cute.CopyAtom,
tma_atom_V: cute.CopyAtom,
tma_atom_dO: cute.CopyAtom,
pipeline_Q: cutlass.pipeline.PipelineAsync,
pipeline_dO: cutlass.pipeline.PipelineAsync,
block_info: BlockInfo,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
blocksparse_tensors: Optional[BlockSparseTensors] = None,
qhead_per_kvhead_divmod: Optional[FastDivmodDivisor] = None,
):
warp_idx_in_wg = cute.arch.make_warp_uniform(cute.arch.warp_idx()) % 4
if warp_idx_in_wg == 0:
producer_state_Q = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.Q_stage
)
producer_state_dO = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.dO_stage
)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
n_block, head_idx, batch_idx, _ = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
head_idx_kv = (
head_idx
if const_expr(self.qhead_per_kvhead == 1)
else head_idx // qhead_per_kvhead_divmod
)
mK_cur = mK[None, None, head_idx_kv, batch_idx]
gK = cute.local_tile(mK_cur, (self.tile_n, self.tile_hdim), (n_block, 0))
mV_cur = mV[None, None, head_idx_kv, batch_idx]
gV = cute.local_tile(mV_cur, (self.tile_n, self.tile_hdimv), (n_block, 0))
mQ_cur = mQ[None, None, head_idx, batch_idx]
gQ = cute.local_tile(mQ_cur, (self.tile_m, self.tile_hdim), (None, 0))
mdO_cur = mdO[None, None, head_idx, batch_idx]
gdO = cute.local_tile(mdO_cur, (self.tile_m, self.tile_hdimv), (None, 0))
mLSE_cur = mLSE[None, head_idx, batch_idx]
gLSE = cute.local_tile(mLSE_cur, (self.tile_m,), (None,))
mdPsum_cur = mdPsum[None, head_idx, batch_idx]
gdPsum = cute.local_tile(mdPsum_cur, (self.tile_m,), (None,))
load_K, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_K, 0, cute.make_layout(1), gK, sK, single_stage=True
)
load_V, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_V, 0, cute.make_layout(1), gV, sV, single_stage=True
)
load_Q, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_Q, 0, cute.make_layout(1), gQ, sQ
)
load_Q = copy_utils.tma_producer_copy_fn(load_Q, pipeline_Q)
load_dO, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_dO, 0, cute.make_layout(1), gdO, sdO
)
load_dO = copy_utils.tma_producer_copy_fn(load_dO, pipeline_dO)
load_LSE = copy_utils.cpasync_bulk_get_copy_fn(gLSE, sLSE)
load_LSE = copy_utils.tma_producer_copy_fn(load_LSE, pipeline_Q)
load_dPsum = copy_utils.cpasync_bulk_get_copy_fn(gdPsum, sdPsum)
load_dPsum = copy_utils.tma_producer_copy_fn(load_dPsum, pipeline_dO)
m_block_min, m_block_max = block_info.get_m_block_min_max(seqlen, n_block)
if const_expr(not self.use_block_sparsity):
total_m_block_cnt = m_block_max - m_block_min
process_tile = const_expr(not self.is_local) or m_block_min < m_block_max
else:
total_m_block_cnt = get_total_q_block_count_bwd(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
process_tile = total_m_block_cnt > Int32(0)
if process_tile:
if const_expr(not self.use_block_sparsity):
first_m_block = m_block_min
pipeline_Q.producer_acquire(
producer_state_Q, extra_tx_count=self.tma_copy_bytes["K"]
)
load_K(tma_bar_ptr=pipeline_Q.producer_get_barrier(producer_state_Q))
load_Q(first_m_block, producer_state=producer_state_Q)
load_LSE(first_m_block, producer_state=producer_state_Q)
producer_state_dO_cur = (
producer_state_dO
if const_expr(self.Q_stage != self.dO_stage)
else producer_state_Q
)
pipeline_dO.producer_acquire(
producer_state_dO_cur, extra_tx_count=self.tma_copy_bytes["V"]
)
load_V(tma_bar_ptr=pipeline_dO.producer_get_barrier(producer_state_dO_cur))
load_dO(first_m_block, producer_state=producer_state_dO_cur)
load_dPsum(first_m_block, producer_state=producer_state_dO_cur)
producer_state_Q.advance()
producer_state_dO.advance()
for m_block in cutlass.range(m_block_min + 1, m_block_max, unroll=1):
pipeline_Q.producer_acquire(producer_state_Q)
load_Q(m_block, producer_state=producer_state_Q)
load_LSE(m_block, producer_state=producer_state_Q)
producer_state_dO_cur = (
producer_state_dO
if const_expr(self.Q_stage != self.dO_stage)
else producer_state_Q
)
pipeline_dO.producer_acquire(producer_state_dO_cur)
load_dO(m_block, producer_state=producer_state_dO_cur)
load_dPsum(m_block, producer_state=producer_state_dO_cur)
producer_state_Q.advance()
producer_state_dO.advance()
else:
producer_state_Q, producer_state_dO = produce_block_sparse_q_loads_bwd_sm90(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
producer_state_Q,
producer_state_dO,
pipeline_Q,
pipeline_dO,
load_K,
load_V,
load_Q,
load_dO,
load_LSE,
load_dPsum,
self.tma_copy_bytes["K"],
self.tma_copy_bytes["V"],
Q_stage_eq_dO_stage=(self.Q_stage == self.dO_stage),
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
tile_scheduler.prefetch_next_work()
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
@cute.jit
def apply_score_mod(
self,
acc_S: cute.Tensor,
thr_mma_SdP: cute.core.ThrMma,
batch_idx,
head_idx,
m_block,
n_block,
softmax_scale,
seqlen_info: SeqlenInfoQK,
aux_tensors=None,
fastdiv_mods=(None, None),
):
# [NOTE] SdP_swapAB: swapAB transposes the tile, so use (n, m) indexing
cS = cute.make_identity_tensor(
(self.tile_n, self.tile_m) if self.SdP_swapAB else (self.tile_m, self.tile_n)
)
cS = cute.domain_offset(
(n_block * self.tile_n, m_block * self.tile_m)
if self.SdP_swapAB
else (m_block * self.tile_m, n_block * self.tile_n),
cS,
)
tScS = thr_mma_SdP.partition_C(cS)
apply_score_mod_inner(
acc_S,
tScS,
self.score_mod,
batch_idx,
head_idx,
softmax_scale,
self.vec_size,
self.qk_acc_dtype,
aux_tensors,
fastdiv_mods,
seqlen_info,
constant_q_idx=None,
qhead_per_kvhead=self.qhead_per_kvhead,
transpose_indices=self.SdP_swapAB,
)
@cute.jit
def apply_score_mod_bwd(
self,
grad_tensor: cute.Tensor,
score_tensor: cute.Tensor,
thr_mma_SdP: cute.core.ThrMma,
batch_idx,
head_idx,
m_block,
n_block,
softmax_scale,
seqlen_info: SeqlenInfoQK,
aux_tensors=None,
fastdiv_mods=(None, None),
):
cS = cute.make_identity_tensor(
(self.tile_n, self.tile_m) if self.SdP_swapAB else (self.tile_m, self.tile_n)
)
cS = cute.domain_offset(
(n_block * self.tile_n, m_block * self.tile_m)
if self.SdP_swapAB
else (m_block * self.tile_m, n_block * self.tile_n),
cS,
)
tScS = thr_mma_SdP.partition_C(cS)
apply_score_mod_bwd_inner(
grad_tensor,
score_tensor,
tScS,
self.score_mod_bwd,
batch_idx,
head_idx,
softmax_scale,
self.vec_size,
self.qk_acc_dtype,
aux_tensors,
fastdiv_mods,
seqlen_info,
constant_q_idx=None,
qhead_per_kvhead=self.qhead_per_kvhead,
transpose_indices=self.SdP_swapAB,
)
@cute.jit
def mma(
self,
tiled_mma_SdP: cute.TiledMma,
tiled_mma_dK: cute.TiledMma,
tiled_mma_dV: cute.TiledMma,
tiled_mma_dQ: cute.TiledMma,
mdK: cute.Tensor,
mdV: cute.Tensor,
mdQaccum: cute.Tensor,
sQ: cute.Tensor,
sK: cute.Tensor,
sV: cute.Tensor,
sdO: cute.Tensor,
sP: Optional[cute.Tensor],
sdS: cute.Tensor,
sLSE: cute.Tensor,
sdPsum: cute.Tensor,
sdQaccum: cute.Tensor,
pipeline_Q: cutlass.pipeline.PipelineAsync,
pipeline_dO: cutlass.pipeline.PipelineAsync,
tidx: Int32,
tma_atom_dK: cute.CopyAtom,
tma_atom_dV: cute.CopyAtom,
r2s_tiled_copy_dQaccum: cute.TiledCopy,
softmax_scale_log2: Float32,
softmax_scale: Float32,
block_info: BlockInfo,
SeqlenInfoCls: Callable,
AttentionMaskCls: Callable,
TileSchedulerCls: Callable,
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
blocksparse_tensors: Optional[BlockSparseTensors] = None,
qhead_per_kvhead_divmod: Optional[FastDivmodDivisor] = None,
):
warp_group_idx = cute.arch.make_warp_uniform(tidx // self.num_threads_per_warp_group)
warp_group_thread_layout = cute.make_layout(
self.num_mma_warp_groups, stride=self.num_threads_per_warp_group
)
thr_mma_SdP = tiled_mma_SdP.get_slice(tidx)
wg_mma_SdP = tiled_mma_SdP.get_slice(warp_group_thread_layout(warp_group_idx))
wg_mma_dK = tiled_mma_dK.get_slice(warp_group_thread_layout(warp_group_idx))
wg_mma_dV = tiled_mma_dV.get_slice(warp_group_thread_layout(warp_group_idx))
wg_mma_dQ = tiled_mma_dQ.get_slice(warp_group_thread_layout(warp_group_idx))
# S = Q @ K.T
shape_mnk_S = (self.tile_m, self.tile_n, self.tile_hdim)
_, tSrQ, tSrK = sm90_utils.partition_fragment_ABC(
wg_mma_SdP, shape_mnk_S, sQ, sK, swap_AB=self.SdP_swapAB
)
mma_qk_fn = partial(
gemm_zero_init, tiled_mma_SdP, shape_mnk_S[:2], tSrQ, tSrK, swap_AB=self.SdP_swapAB
)
# dP = dO @ V.T
shape_mnk_dP = (self.tile_m, self.tile_n, self.tile_hdimv)
_, tdPrdO, tdPrV = sm90_utils.partition_fragment_ABC(
wg_mma_SdP, shape_mnk_dP, sdO, sV, swap_AB=self.SdP_swapAB
)
mma_dov_fn = partial(
gemm_zero_init, tiled_mma_SdP, shape_mnk_dP[:2], tdPrdO, tdPrV, swap_AB=self.SdP_swapAB
)
# dV += P.T @ dO
sPt = layout_utils.transpose_view(sP) if sP is not None else None
sdOt = layout_utils.transpose_view(sdO)
shape_mnk_dV = (self.tile_n, self.tile_hdimv, self.tile_m)
acc_dV, tdVrPt, tdVrdOt = sm90_utils.partition_fragment_ABC(
wg_mma_dV, shape_mnk_dV, sPt, sdOt, swap_AB=self.dKV_swapAB
)
if const_expr(not self.mma_dkv_is_rs):
mma_pdo_fn = partial(
gemm_w_idx, tiled_mma_dV, acc_dV, tdVrPt, tdVrdOt, swap_AB=self.dKV_swapAB
)
else:
mma_pdo_fn = partial(gemm_w_idx, tiled_mma_dV, acc_dV, tCrB=tdVrdOt)
# dK += dS.T @ Q
sdSt = layout_utils.transpose_view(sdS)
sQt = layout_utils.transpose_view(sQ)
shape_mnk_dK = (self.tile_n, self.tile_hdim, self.tile_m)
acc_dK, tdKrdSt, tdKrQt = sm90_utils.partition_fragment_ABC(
wg_mma_dK, shape_mnk_dK, sdSt, sQt, swap_AB=self.dKV_swapAB
)
if const_expr(not self.mma_dkv_is_rs):
mma_dsq_fn = partial(
gemm_w_idx, tiled_mma_dK, acc_dK, tdKrdSt, tdKrQt, swap_AB=self.dKV_swapAB
)
else:
mma_dsq_fn = partial(gemm_w_idx, tiled_mma_dK, acc_dK, tCrB=tdKrQt)
# dQ = dS @ K
sKt = layout_utils.transpose_view(sK)
shape_mnk_dQ = (self.tile_m, self.tile_hdim, self.tile_n)
_, tdQrdS, tdQrKt = sm90_utils.partition_fragment_ABC(
wg_mma_dQ, shape_mnk_dQ, sdS, sKt, swap_AB=self.dQ_swapAB
)
mma_dsk_fn = partial(
gemm_zero_init, tiled_mma_dQ, shape_mnk_dQ[:2], tdQrdS, tdQrKt, swap_AB=self.dQ_swapAB
)
# Smem copy atom tiling
copy_P_r2s = None
if const_expr(sP is not None):
sP_cpy = sP if const_expr(not self.SdP_swapAB) else sPt
copy_P_r2s, _, _ = copy_utils.get_smem_store_C(
tiled_mma_SdP, sP_cpy, tidx, self.arch, transpose=self.SdP_swapAB
)
sdS_cpy = sdS if const_expr(not self.SdP_swapAB) else sdSt
copy_dS_r2s, _, _ = copy_utils.get_smem_store_C(
tiled_mma_SdP, sdS_cpy, tidx, self.arch, transpose=self.SdP_swapAB
)
tLSEsLSE = layout_utils.mma_partition_C_vec(
sLSE, thr_mma_SdP, expand_shape=self.tile_n, is_colvec=not self.SdP_swapAB
)
tLSEsdPsum = layout_utils.mma_partition_C_vec(
sdPsum, thr_mma_SdP, expand_shape=self.tile_n, is_colvec=not self.SdP_swapAB
)
smem_thr_copy_dQaccum = r2s_tiled_copy_dQaccum.get_slice(tidx)
tdQsdQaccum = smem_thr_copy_dQaccum.partition_D(sdQaccum)
PdS_barrier = cutlass.pipeline.NamedBarrier(
barrier_id=int(NamedBarrierBwd.PdS), num_threads=self.num_mma_threads
)
score_mod_fn = partial(
self.apply_score_mod,
thr_mma_SdP=thr_mma_SdP,
softmax_scale=softmax_scale,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
score_mod_bwd_fn = partial(
self.apply_score_mod_bwd,
thr_mma_SdP=thr_mma_SdP,
softmax_scale=softmax_scale,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
mma_one_m_block_all = partial(
self.mma_one_m_block,
warp_group_idx=warp_group_idx,
mma_qk_fn=mma_qk_fn,
mma_dov_fn=mma_dov_fn,
mma_pdo_fn=mma_pdo_fn,
mma_dsq_fn=mma_dsq_fn,
mma_dsk_fn=mma_dsk_fn,
copy_P_r2s=copy_P_r2s,
copy_dS_r2s=copy_dS_r2s,
pipeline_Q=pipeline_Q,
pipeline_dO=pipeline_dO,
tLSEsLSE=tLSEsLSE,
tLSEsdPsum=tLSEsdPsum,
tdQsdQaccum=tdQsdQaccum,
softmax_scale_log2=softmax_scale_log2,
PdS_barrier=PdS_barrier,
# acc_dV=acc_dV,
# acc_dK=acc_dK,
)
consumer_state_Q = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, self.Q_stage
)
consumer_state_dO = cutlass.pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, self.dO_stage
)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
n_block, head_idx, batch_idx, _ = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
mask = AttentionMaskCls(seqlen)
score_mod_fn_cur = partial(
score_mod_fn,
batch_idx=batch_idx,
head_idx=head_idx,
n_block=n_block,
seqlen_info=seqlen,
)
score_mod_bwd_fn_cur = partial(
score_mod_bwd_fn,
batch_idx=batch_idx,
head_idx=head_idx,
n_block=n_block,
seqlen_info=seqlen,
)
m_block_min, m_block_max = block_info.get_m_block_min_max(seqlen, n_block)
if const_expr(not self.use_block_sparsity):
process_tile = const_expr(not self.is_local) or m_block_min < m_block_max
else:
total_m_block_cnt = get_total_q_block_count_bwd(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
process_tile = total_m_block_cnt > Int32(0)
if process_tile:
if const_expr(not self.use_block_sparsity):
mask_fn = partial(
mask.apply_mask,
batch_idx=batch_idx,
head_idx=head_idx,
n_block=n_block,
thr_mma=thr_mma_SdP,
mask_seqlen=True,
mask_causal=self.is_causal,
mask_local=self.is_local,
mask_mod=self.mask_mod,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
dKV_accumulate = False
for m_block in cutlass.range(m_block_min, m_block_max, unroll=1):
consumer_state_Q, consumer_state_dO = mma_one_m_block_all(
m_block,
consumer_state_Q,
consumer_state_dO,
mask_fn=mask_fn,
score_mod_fn=score_mod_fn_cur,
score_mod_bwd_fn=score_mod_bwd_fn_cur,
dKV_accumulate=dKV_accumulate,
)
dKV_accumulate = True
else:
consumer_state_Q, consumer_state_dO = consume_block_sparse_mma_bwd_sm90(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
consumer_state_Q,
consumer_state_dO,
mma_one_m_block_all,
mask,
self.mask_mod,
is_causal=self.is_causal,
is_local=self.is_local,
thr_mma_SdP=thr_mma_SdP,
score_mod_fn=score_mod_fn_cur,
score_mod_bwd_fn=score_mod_bwd_fn_cur,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
if const_expr(self.qhead_per_kvhead == 1):
acc_dK.store(acc_dK.load() * softmax_scale)
self.epilogue_dKV(
acc_dV,
mdV,
sV,
acc_dK,
mdK,
sK,
seqlen,
tma_atom_dK,
tma_atom_dV,
tiled_mma_dK,
tiled_mma_dV,
tidx,
n_block,
head_idx,
batch_idx,
qhead_per_kvhead_divmod,
)
else:
# Block sparsity: KV tile with zero Q blocks produces no dK/dV; write zeros.
if const_expr(self.use_block_sparsity):
acc_dK.fill(0.0)
acc_dV.fill(0.0)
self.epilogue_dKV(
acc_dV,
mdV,
sV,
acc_dK,
mdK,
sK,
seqlen,
tma_atom_dK,
tma_atom_dV,
tiled_mma_dK,
tiled_mma_dV,
tidx,
n_block,
head_idx,
batch_idx,
qhead_per_kvhead_divmod,
)
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx())
if warp_idx == 4:
cute.arch.cp_async_bulk_wait_group(0, read=True)
@cute.jit
def mma_one_m_block(
self,
m_block: Int32,
consumer_state_Q: cutlass.pipeline.PipelineState | pipeline.PipelineStateSimple,
consumer_state_dO: cutlass.pipeline.PipelineState | pipeline.PipelineStateSimple,
warp_group_idx: Int32,
mma_qk_fn: Callable,
mma_dov_fn: Callable,
mma_pdo_fn: Callable,
mma_dsq_fn: Callable,
mma_dsk_fn: Callable,
copy_P_r2s: Optional[Callable],
copy_dS_r2s: Callable,
pipeline_Q: cutlass.pipeline.PipelineAsync,
pipeline_dO: cutlass.pipeline.PipelineAsync,
tLSEsLSE: cute.Tensor,
tLSEsdPsum: cute.Tensor,
tdQsdQaccum: cute.Tensor,
softmax_scale_log2: Float32,
PdS_barrier: cutlass.pipeline.NamedBarrier,
mask_fn: Optional[Callable] = None,
score_mod_fn: Optional[Callable] = None,
score_mod_bwd_fn: Optional[Callable] = None,
dKV_accumulate: Boolean = True,
):
consumer_state_dO_cur = (
consumer_state_dO if const_expr(self.Q_stage == self.dO_stage) else consumer_state_Q
)
smem_idx_Q = consumer_state_Q.index
smem_idx_dO = consumer_state_dO_cur.index if const_expr(self.dO_stage > 1) else 0
smem_idx_PdS = smem_idx_Q if const_expr(self.PdS_stage > 1) else 0
# (1) [GEMM 1] S = Q @ K^T
pipeline_Q.consumer_wait(consumer_state_Q, pipeline_Q.consumer_try_wait(consumer_state_Q))
acc_S = mma_qk_fn(A_idx=smem_idx_Q, wg_wait=-1)
tLSErLSE = copy_utils.load_s2r(tLSEsLSE[None, smem_idx_Q])
# (2) [GEMM 2] dP = dO @ V.T
pipeline_dO.consumer_wait(
consumer_state_dO_cur, pipeline_dO.consumer_try_wait(consumer_state_dO_cur)
)
acc_dP = mma_dov_fn(A_idx=smem_idx_Q, wg_wait=1)
if const_expr(self.score_mod_bwd is not None):
acc_S_pre = cute.make_fragment_like(acc_S)
cute.autovec_copy(acc_S, acc_S_pre)
if const_expr(self.score_mod is not None):
score_mod_fn(acc_S, m_block=m_block)
# (3) [Pointwise 1] P = exp(S - LSE)
if cutlass.const_expr(mask_fn is not None):
mask_fn(acc_S, m_block=m_block)
acc_S_mn = layout_utils.reshape_acc_to_mn(acc_S, transpose=self.SdP_swapAB)
for r in cutlass.range_constexpr(cute.size(acc_S_mn, mode=[0])):
for c in cutlass.range(cute.size(acc_S_mn, mode=[1]), unroll_full=True):
acc_S_mn[r, c] = cute.math.exp2(
acc_S_mn[r, c] * softmax_scale_log2 - tLSErLSE[r], fastmath=True
)
tLSErdPsum = copy_utils.load_s2r(tLSEsdPsum[None, smem_idx_dO])
# Convert P from f32 -> f16
tdVrP = utils.cvt_f16(layout_utils.reshape_acc_to_frgA(acc_S), self.dtype)
# R2S for P
if const_expr(not self.mma_dkv_is_rs):
# sync to ensure P has already been used in the previous iteration before overwriting
if const_expr(self.PdS_stage == 1):
PdS_barrier.arrive_and_wait()
copy_P_r2s(tdVrP, dst_idx=smem_idx_PdS)
# (4) [Pointwise 2] dS = P*(dP-dPsum)
warpgroup.wait_group(0)
acc_dP_mn = layout_utils.reshape_acc_to_mn(acc_dP, transpose=self.SdP_swapAB)
for r in cutlass.range_constexpr(cute.size(acc_dP_mn, mode=[0])):
for c in cutlass.range(cute.size(acc_dP_mn, mode=[1]), unroll_full=True):
acc_dP_mn[r, c] = acc_S_mn[r, c] * (acc_dP_mn[r, c] - tLSErdPsum[r])
if const_expr(self.score_mod_bwd is not None):
score_mod_bwd_fn(acc_dP, acc_S_pre, m_block=m_block)
# Convert dS from f32 -> f16
tdKrdS = utils.cvt_f16(layout_utils.reshape_acc_to_frgA(acc_dP), self.dtype)
# If there's double buffering on dS, we don't need to sync here.
# Otherwise we might have WG1 writing to dS before WG2 is done reading from it during MmadQ.
# But because both WGs have to sync at the end of the loop and double buffering,
# this race condition is not possible.
# This sync is to ensure (1) P is written in case of !mma_dkv_is_rs and
# (2) dS is already read by the Mma in the previous iteration in case of mma_dkv_is_rs.
if const_expr(not self.mma_dkv_is_rs or (self.PdS_stage == 1 and self.mma_dkv_is_rs)):
cute.arch.fence_view_async_shared()
PdS_barrier.arrive_and_wait()
# R2S for dS
copy_dS_r2s(tdKrdS, dst_idx=smem_idx_PdS)
# (5) [GEMM 3] dV += P.T @ dO
if const_expr(not self.mma_dkv_is_rs):
mma_pdo_fn(
A_idx=smem_idx_PdS, B_idx=smem_idx_dO, zero_init=not dKV_accumulate, wg_wait=-1
)
else:
mma_pdo_fn(tCrA=tdVrP, B_idx=smem_idx_dO, zero_init=not dKV_accumulate, wg_wait=-1)
# smem fence to make sure sdS is written before it's read by WGMMA
cute.arch.fence_view_async_shared()
PdS_barrier.arrive_and_wait()
# (6) [GEMM 4] dQ = dS @ K
acc_dQ = mma_dsk_fn(A_idx=smem_idx_PdS, wg_wait=1)
# if cute.arch.thread_idx()[0] == 128: cute.print_tensor(acc_dV)
pipeline_dO.consumer_release(consumer_state_dO_cur) # release dO as dV mma is done
# (7) [GEMM 5] dK += dS.T @ Q
if const_expr(not self.mma_dkv_is_rs):
mma_dsq_fn(
A_idx=smem_idx_PdS, B_idx=smem_idx_Q, zero_init=not dKV_accumulate, wg_wait=1
)
else:
mma_dsq_fn(tCrA=tdKrdS, B_idx=smem_idx_Q, zero_init=not dKV_accumulate, wg_wait=1)
# if cute.arch.thread_idx()[0] == 128: cute.print_tensor(acc_dQ)
cute.arch.barrier(
barrier_id=int(NamedBarrierBwd.dQEmptyWG0) + warp_group_idx,
number_of_threads=self.num_threads_per_warp_group + cute.arch.WARP_SIZE,
)
tdQrdQaccum_flat = cute.make_tensor(acc_dQ.iterator, cute.make_layout(tdQsdQaccum.shape))
cute.autovec_copy(tdQrdQaccum_flat, tdQsdQaccum)
cute.arch.fence_view_async_shared()
cute.arch.barrier_arrive(
barrier_id=int(NamedBarrierBwd.dQFullWG0) + warp_group_idx,
number_of_threads=self.num_threads_per_warp_group + cute.arch.WARP_SIZE,
)
warpgroup.wait_group(0)
# if cute.arch.thread_idx()[0] == 128: cute.print_tensor(acc_dK)
pipeline_Q.consumer_release(consumer_state_Q)
# if cute.arch.thread_idx()[0] % 32 == 0: cute.printf("tidx = {}, m_block = {}, after pipeline_Q consumer release", cute.arch.thread_idx()[0], m_block)
consumer_state_Q.advance()
consumer_state_dO.advance()
return consumer_state_Q, consumer_state_dO
@cute.jit
def epilogue_dKV(
self,
acc_dV: cute.Tensor,
mdV: cute.Tensor,
sV: cute.Tensor,
acc_dK: cute.Tensor,
mdK: cute.Tensor,
sK: cute.Tensor,
seqlen: SeqlenInfoQK,
tma_atom_dK: cute.CopyAtom,
tma_atom_dV: cute.CopyAtom,
tiled_mma_dK: cute.TiledMma,
tiled_mma_dV: cute.TiledMma,
tidx: Int32,
n_block: Int32,
head_idx: Int32,
batch_idx: Int32,
qhead_per_kvhead_divmod: Optional[FastDivmodDivisor] = None,
):
epi_barrier = cutlass.pipeline.NamedBarrier(
barrier_id=int(NamedBarrierBwd.Epilogue), num_threads=self.num_mma_threads
)
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx())
if const_expr(self.qhead_per_kvhead == 1):
mdV_cur = mdV[None, None, head_idx, batch_idx]
mdK_cur = mdK[None, None, head_idx, batch_idx]
gdK = cute.local_tile(mdK_cur, (self.tile_n, self.tile_hdim), (n_block, 0))
gdV = cute.local_tile(mdV_cur, (self.tile_n, self.tile_hdimv), (n_block, 0))
store_dK, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_dK, 0, cute.make_layout(1), sK, gdK, single_stage=True
)
store_dV, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_dV, 0, cute.make_layout(1), sV, gdV, single_stage=True
)
sdV = sV if const_expr(not self.dKV_swapAB) else layout_utils.transpose_view(sV)
sdK = sK if const_expr(not self.dKV_swapAB) else layout_utils.transpose_view(sK)
copy_dV_r2s, _, _ = copy_utils.get_smem_store_C(
tiled_mma_dV, sdV, tidx, self.arch, transpose=self.dKV_swapAB
)
copy_dK_r2s, _, _ = copy_utils.get_smem_store_C(
tiled_mma_dK, sdK, tidx, self.arch, transpose=self.dKV_swapAB
)
cute.arch.cp_async_bulk_wait_group(1, read=True)
epi_barrier.arrive_and_wait()
copy_dV_r2s(acc_dV, dst_idx=None)
cute.arch.fence_view_async_shared()
epi_barrier.arrive_and_wait()
if warp_idx == 4:
store_dV()
cute.arch.cp_async_bulk_commit_group()
cute.arch.cp_async_bulk_wait_group(1, read=True)
epi_barrier.arrive_and_wait()
copy_dK_r2s(acc_dK, dst_idx=None)
cute.arch.fence_view_async_shared()
epi_barrier.arrive_and_wait()
if warp_idx == 4:
store_dK()
cute.arch.cp_async_bulk_commit_group()
else:
sdKaccum_shape0 = self.tile_n * self.tile_hdim // self.num_mma_warp_groups
sdVaccum_shape0 = self.tile_n * self.tile_hdimv // self.num_mma_warp_groups
sdKaccum_layout = cute.make_layout((sdKaccum_shape0, self.num_mma_warp_groups))
sdVaccum_layout = cute.make_layout((sdVaccum_shape0, self.num_mma_warp_groups))
head_idx_kv = head_idx // qhead_per_kvhead_divmod
mdKaccum_cur = mdK[None, head_idx_kv, batch_idx]
gdKaccum_ = cute.local_tile(mdKaccum_cur, (self.tile_n * self.tile_hdim,), (n_block,))
gdKaccum = cute.flat_divide(gdKaccum_, (sdKaccum_shape0,))
mdVaccum_cur = mdV[None, head_idx_kv, batch_idx]
gdVaccum_ = cute.local_tile(mdVaccum_cur, (self.tile_n * self.tile_hdimv,), (n_block,))
gdVaccum = cute.flat_divide(gdVaccum_, (sdVaccum_shape0,))
# These two overlap each other
sVaccum_ptr = cute.recast_ptr(sV.iterator, dtype=Float32)
sdKaccum = cute.make_tensor(sVaccum_ptr, sdKaccum_layout)
sdVaccum = cute.make_tensor(sVaccum_ptr, sdVaccum_layout)
tiled_copy_dKVaccum_r2s = cute.make_tiled_copy_tv(
cute.make_copy_atom(cute.nvgpu.CopyUniversalOp(), Float32, num_bits_per_copy=128),
cute.make_layout((self.num_threads_per_warp_group, self.num_mma_warp_groups)),
cute.make_layout(128 // Float32.width),
)
thr_copy_dKVaccum_r2s = tiled_copy_dKVaccum_r2s.get_slice(tidx)
tdKsdKaccum = thr_copy_dKVaccum_r2s.partition_D(sdKaccum)
tdVsdVaccum = thr_copy_dKVaccum_r2s.partition_D(sdVaccum)
cute.arch.cp_async_bulk_wait_group(0, read=True)
epi_barrier.arrive_and_wait()
tdKrdKaccum_flat = cute.make_tensor(acc_dK.iterator, tdKsdKaccum.shape)
cute.autovec_copy(tdKrdKaccum_flat, tdKsdKaccum)
cute.arch.fence_view_async_shared()
epi_barrier.arrive_and_wait()
if warp_idx == 4:
with cute.arch.elect_one():
for wg_idx in cutlass.range_constexpr(self.num_mma_warp_groups):
copy_utils.cpasync_reduce_bulk_add_f32(
sdKaccum[None, wg_idx].iterator,
gdKaccum[None, wg_idx].iterator,
self.tma_copy_bytes["dKacc"] // self.num_mma_warp_groups,
)
cute.arch.cp_async_bulk_commit_group()
cute.arch.cp_async_bulk_wait_group(0, read=True)
epi_barrier.arrive_and_wait()
tdVrdVaccum_flat = cute.make_tensor(acc_dV.iterator, tdVsdVaccum.shape)
cute.autovec_copy(tdVrdVaccum_flat, tdVsdVaccum)
cute.arch.fence_view_async_shared()
epi_barrier.arrive_and_wait()
if warp_idx == 4:
with cute.arch.elect_one():
for wg_idx in cutlass.range_constexpr(self.num_mma_warp_groups):
copy_utils.cpasync_reduce_bulk_add_f32(
sdVaccum[None, wg_idx].iterator,
gdVaccum[None, wg_idx].iterator,
self.tma_copy_bytes["dVacc"] // self.num_mma_warp_groups,
)
cute.arch.cp_async_bulk_commit_group()
@cute.jit
def dQaccum_store(
self,
mdQaccum: cute.Tensor,
sdQaccum: cute.Tensor,
block_info: BlockInfo,
TileSchedulerCls: cutlass.Constexpr[Callable],
SeqlenInfoCls: cutlass.Constexpr[Callable],
blocksparse_tensors: Optional[BlockSparseTensors] = None,
):
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
n_block, head_idx, batch_idx, _ = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
mdQaccum_cur = mdQaccum[None, head_idx, batch_idx]
gdQaccum_ = cute.local_tile(mdQaccum_cur, (self.tile_m * self.tile_hdim,), (None,))
# (M * K / WG, WG, _)
gdQaccum = cute.flat_divide(
gdQaccum_, (self.tile_m * self.tile_hdim // self.num_mma_warp_groups,)
)
m_block_min, m_block_max = block_info.get_m_block_min_max(seqlen, n_block)
if const_expr(not self.use_block_sparsity):
process_tile = const_expr(not self.is_local) or m_block_min < m_block_max
loop_count = m_block_max - m_block_min
else:
total_block_cnt = get_total_q_block_count_bwd(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
)
process_tile = total_block_cnt > Int32(0)
if process_tile:
if const_expr(not self.use_block_sparsity):
for iter_idx in cutlass.range(loop_count, unroll=1):
m_block = m_block_min + iter_idx
m_block_safe = m_block
for warp_group_idx in cutlass.range_constexpr(self.num_mma_warp_groups):
cute.arch.cp_async_bulk_wait_group(
self.num_mma_warp_groups - 1 - warp_group_idx, read=True
)
cute.arch.barrier_arrive(
barrier_id=int(NamedBarrierBwd.dQEmptyWG0) + warp_group_idx,
number_of_threads=self.num_threads_per_warp_group
+ cute.arch.WARP_SIZE,
)
for warp_group_idx in cutlass.range_constexpr(self.num_mma_warp_groups):
cute.arch.barrier(
barrier_id=int(NamedBarrierBwd.dQFullWG0) + warp_group_idx,
number_of_threads=self.num_threads_per_warp_group
+ cute.arch.WARP_SIZE,
)
with cute.arch.elect_one():
copy_utils.cpasync_reduce_bulk_add_f32(
sdQaccum[None, warp_group_idx].iterator,
gdQaccum[None, warp_group_idx, m_block_safe].iterator,
self.tma_copy_bytes["dQ"],
)
cute.arch.cp_async_bulk_commit_group()
else:
dQaccum_store_block_sparse_bwd_sm90(
blocksparse_tensors,
batch_idx,
head_idx,
n_block,
sdQaccum,
gdQaccum,
subtile_factor=self.subtile_factor,
m_block_max=m_block_max,
num_mma_warp_groups=self.num_mma_warp_groups,
num_threads_per_warp_group=self.num_threads_per_warp_group,
tma_copy_bytes_dQ=self.tma_copy_bytes["dQ"],
)
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
cute.arch.cp_async_bulk_wait_group(0, read=True)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/flash_bwd_sm90.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1499,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/cute/flash_fwd_combine.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
# A reimplementation of https://github.com/Dao-AILab/flash-attention/blob/main/hopper/flash_fwd_combine_kernel.h
# from Cutlass C++ to Cute-DSL.
import math
from typing import Type, Optional
from functools import partial
import cuda.bindings.driver as cuda
import cutlass
import cutlass.cute as cute
from cutlass.cute.nvgpu import cpasync
from cutlass import Float32, Int32, const_expr
from flash_attn.cute import utils
from flash_attn.cute.cute_dsl_utils import assume_tensor_aligned
from flash_attn.cute.seqlen_info import SeqlenInfo
from cutlass.cute import FastDivmodDivisor
class FlashAttentionForwardCombine:
def __init__(
self,
dtype: Type[cutlass.Numeric],
dtype_partial: Type[cutlass.Numeric],
head_dim: int,
m_block_size: int = 8,
k_block_size: int = 64,
log_max_splits: int = 4,
num_threads: int = 256,
stages: int = 4,
):
"""
Forward combine kernel for split attention computation.
:param dtype: output data type
:param dtype_partial: partial accumulation data type
:param head_dim: head dimension
:param m_block_size: m block size
:param k_block_size: k block size
:param log_max_splits: log2 of maximum splits
:param num_threads: number of threads
:param varlen: whether using variable length sequences
:param stages: number of pipeline stages
"""
self.dtype = dtype
self.dtype_partial = dtype_partial
self.head_dim = head_dim
self.m_block_size = m_block_size
self.k_block_size = k_block_size
self.max_splits = 1 << log_max_splits
self.num_threads = num_threads
self.is_even_k = head_dim % k_block_size == 0
self.stages = stages
@staticmethod
def can_implement(
dtype,
dtype_partial,
head_dim,
m_block_size,
k_block_size,
log_max_splits,
num_threads,
) -> bool:
"""Check if the kernel can be implemented with the given parameters."""
if dtype not in [cutlass.Float16, cutlass.BFloat16, cutlass.Float32]:
return False
if dtype_partial not in [cutlass.Float16, cutlass.BFloat16, Float32]:
return False
if head_dim % 8 != 0:
return False
if num_threads % 32 != 0:
return False
if m_block_size % 8 != 0:
return False
max_splits = 1 << log_max_splits
if max_splits > 256:
return False
if (m_block_size * max_splits) % num_threads != 0:
return False
return True
def _setup_attributes(self):
# GMEM copy setup for O partial
universal_copy_bits = 128
async_copy_elems = universal_copy_bits // self.dtype_partial.width
assert self.k_block_size % async_copy_elems == 0
k_block_gmem = (
128 if self.k_block_size % 128 == 0 else (64 if self.k_block_size % 64 == 0 else 32)
)
gmem_threads_per_row = k_block_gmem // async_copy_elems
assert self.num_threads % gmem_threads_per_row == 0
# Async copy atom for O partial load
atom_async_copy_partial = cute.make_copy_atom(
cpasync.CopyG2SOp(cache_mode=cpasync.LoadCacheMode.GLOBAL),
self.dtype_partial,
num_bits_per_copy=universal_copy_bits,
)
tOpartial_layout = cute.make_ordered_layout(
(self.num_threads // gmem_threads_per_row, gmem_threads_per_row),
order=(1, 0),
)
vOpartial_layout = cute.make_layout((1, async_copy_elems)) # 4 vals per load
self.gmem_tiled_copy_O_partial = cute.make_tiled_copy_tv(
atom_async_copy_partial, tOpartial_layout, vOpartial_layout
)
# GMEM copy setup for final O (use universal copy for store)
atom_universal_copy = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(),
self.dtype,
num_bits_per_copy=async_copy_elems * self.dtype.width,
)
self.gmem_tiled_copy_O = cute.make_tiled_copy_tv(
atom_universal_copy,
tOpartial_layout,
vOpartial_layout, # 4 vals per store
)
# LSE copy setup with async copy (alignment = 1)
lse_copy_bits = Float32.width # 1 element per copy, width is in bits
m_block_smem = (
128
if self.m_block_size % 128 == 0
else (
64
if self.m_block_size % 64 == 0
else (
32
if self.m_block_size % 32 == 0
else (16 if self.m_block_size % 16 == 0 else 8)
)
)
)
gmem_threads_per_row_lse = m_block_smem
assert self.num_threads % gmem_threads_per_row_lse == 0
# Async copy atom for LSE load
atom_async_copy_lse = cute.make_copy_atom(
cpasync.CopyG2SOp(cache_mode=cpasync.LoadCacheMode.ALWAYS),
Float32,
num_bits_per_copy=lse_copy_bits,
)
tLSE_layout = cute.make_ordered_layout(
(self.num_threads // gmem_threads_per_row_lse, gmem_threads_per_row_lse),
order=(1, 0),
)
vLSE_layout = cute.make_layout(1)
self.gmem_tiled_copy_LSE = cute.make_tiled_copy_tv(
atom_async_copy_lse, tLSE_layout, vLSE_layout
)
# ///////////////////////////////////////////////////////////////////////////////
# Shared memory
# ///////////////////////////////////////////////////////////////////////////////
# Shared memory to register copy for LSE
self.smem_threads_per_col_lse = self.num_threads // m_block_smem
assert 32 % self.smem_threads_per_col_lse == 0 # Must divide warp size
s2r_layout_atom_lse = cute.make_ordered_layout(
(self.smem_threads_per_col_lse, self.num_threads // self.smem_threads_per_col_lse),
order=(0, 1),
)
self.s2r_tiled_copy_LSE = cute.make_tiled_copy_tv(
cute.make_copy_atom(cute.nvgpu.CopyUniversalOp(), Float32),
s2r_layout_atom_lse,
cute.make_layout(1),
)
# LSE shared memory layout with swizzling to avoid bank conflicts
# This works for kBlockMSmem = 8, 16, 32, 64, 128, no bank conflicts
if const_expr(m_block_smem == 8):
smem_lse_swizzle = cute.make_swizzle(5, 0, 5)
elif const_expr(m_block_smem == 16):
smem_lse_swizzle = cute.make_swizzle(4, 0, 4)
else:
smem_lse_swizzle = cute.make_swizzle(3, 2, 3)
smem_layout_atom_lse = cute.make_composed_layout(
smem_lse_swizzle, 0, cute.make_ordered_layout((8, m_block_smem), order=(1, 0))
)
self.smem_layout_lse = cute.tile_to_shape(
smem_layout_atom_lse, (self.max_splits, self.m_block_size), (0, 1)
)
# O partial shared memory layout (simple layout for pipeline stages)
self.smem_layout_o = cute.make_ordered_layout(
(self.m_block_size, self.k_block_size, self.stages), order=(1, 0, 2)
)
@cute.jit
def __call__(
self,
mO_partial: cute.Tensor,
mLSE_partial: cute.Tensor,
mO: cute.Tensor,
mLSE: Optional[cute.Tensor] = None,
cu_seqlens: Optional[cute.Tensor] = None,
seqused: Optional[cute.Tensor] = None,
num_splits_dynamic_ptr: Optional[cute.Tensor] = None,
semaphore_to_reset: Optional[cute.Tensor] = None,
stream: cuda.CUstream = None,
):
# Type checking
if const_expr(not (mO_partial.element_type == self.dtype_partial)):
raise TypeError("O partial tensor must match dtype_partial")
if const_expr(not (mO.element_type == self.dtype)):
raise TypeError("O tensor must match dtype")
if const_expr(mLSE_partial.element_type not in [Float32]):
raise TypeError("LSE partial tensor must be Float32")
if const_expr(mLSE is not None and mLSE.element_type not in [Float32]):
raise TypeError("LSE tensor must be Float32")
# Shape validation - input tensors are in user format, need to be converted to kernel format
if const_expr(len(mO_partial.shape) not in [4, 5]):
raise ValueError(
"O partial tensor must have 4 or 5 dimensions: (num_splits, batch, seqlen, nheads, headdim) or (num_splits, total_q, nheads, headdim)"
)
if const_expr(len(mLSE_partial.shape) not in [3, 4]):
raise ValueError(
"LSE partial tensor must have 3 or 4 dimensions: (num_splits, batch, seqlen, nheads) or (num_splits, total_q, nheads)"
)
if const_expr(len(mO.shape) not in [3, 4]):
raise ValueError(
"O tensor must have 3 or 4 dimensions: (batch, seqlen, nheads, headdim) or (total_q, nheads, headdim)"
)
if const_expr(mLSE is not None and len(mLSE.shape) not in [2, 3]):
raise ValueError(
"LSE tensor must have 2 or 3 dimensions: (batch, seqlen, nheads) or (total_q, nheads)"
)
mO_partial, mO = [assume_tensor_aligned(t) for t in (mO_partial, mO)]
# (num_splits, b, seqlen, h, d) -> (seqlen, d, num_splits, h, b)
# or (num_splits, total_q, h, d) -> (total_q, d, num_splits, h)
O_partial_layout_transpose = (
[2, 4, 0, 3, 1] if const_expr(cu_seqlens is None) else [1, 3, 0, 2]
)
# (b, seqlen, h, d) -> (seqlen, d, h, b) or (total_q, h, d) -> (total_q, d, h)
mO_partial = cute.make_tensor(
mO_partial.iterator, cute.select(mO_partial.layout, mode=O_partial_layout_transpose)
)
O_layout_transpose = [1, 3, 2, 0] if const_expr(cu_seqlens is None) else [0, 2, 1]
mO = cute.make_tensor(mO.iterator, cute.select(mO.layout, mode=O_layout_transpose))
# (num_splits, b, seqlen, h) -> (seqlen, num_splits, h, b)
# or (num_splits, total_q, h) -> (total_q, num_splits, h)
LSE_partial_layout_transpose = [2, 0, 3, 1] if const_expr(cu_seqlens is None) else [1, 0, 2]
mLSE_partial = cute.make_tensor(
mLSE_partial.iterator,
cute.select(mLSE_partial.layout, mode=LSE_partial_layout_transpose),
)
# (b, seqlen, h) -> (seqlen, h, b) or (total_q, h) -> (total_q, h)
LSE_layout_transpose = [1, 2, 0] if const_expr(cu_seqlens is None) else [0, 1]
mLSE = (
cute.make_tensor(mLSE.iterator, cute.select(mLSE.layout, mode=LSE_layout_transpose))
if mLSE is not None
else None
)
# Determine if we have variable length sequences
varlen = const_expr(cu_seqlens is not None or seqused is not None)
self._setup_attributes()
@cute.struct
class SharedStorage:
sLSE: cute.struct.Align[
cute.struct.MemRange[Float32, cute.cosize(self.smem_layout_lse)], 128
]
sMaxValidSplit: cute.struct.Align[cute.struct.MemRange[Int32, self.m_block_size], 128]
sO: cute.struct.Align[
cute.struct.MemRange[self.dtype_partial, cute.cosize(self.smem_layout_o)], 128
]
smem_size = SharedStorage.size_in_bytes()
# Grid dimensions: (ceil_div(seqlen, m_block), ceil_div(head_dim, k_block), num_head * batch)
seqlen = mO_partial.shape[0]
num_head = mO_partial.shape[3]
batch_size = (
mO_partial.shape[4]
if const_expr(cu_seqlens is None)
else Int32(cu_seqlens.shape[0] - 1)
)
# Create FastDivmodDivisor objects for efficient division
seqlen_divmod = FastDivmodDivisor(seqlen)
head_divmod = FastDivmodDivisor(num_head)
grid_dim = (
cute.ceil_div(seqlen * num_head, self.m_block_size),
cute.ceil_div(self.head_dim, self.k_block_size),
batch_size,
)
self.kernel(
mO_partial,
mLSE_partial,
mO,
mLSE,
cu_seqlens,
seqused,
num_splits_dynamic_ptr,
semaphore_to_reset,
SharedStorage,
self.smem_layout_lse,
self.smem_layout_o,
self.gmem_tiled_copy_O_partial,
self.gmem_tiled_copy_O,
self.gmem_tiled_copy_LSE,
self.s2r_tiled_copy_LSE,
seqlen_divmod,
head_divmod,
varlen,
).launch(
grid=grid_dim,
block=[self.num_threads, 1, 1],
smem=smem_size,
stream=stream,
)
@cute.kernel
def kernel(
self,
mO_partial: cute.Tensor,
mLSE_partial: cute.Tensor,
mO: cute.Tensor,
mLSE: Optional[cute.Tensor],
cu_seqlens: Optional[cute.Tensor],
seqused: Optional[cute.Tensor],
num_splits_dynamic_ptr: Optional[cute.Tensor],
semaphore_to_reset: Optional[cute.Tensor],
SharedStorage: cutlass.Constexpr,
smem_layout_lse: cute.Layout | cute.ComposedLayout,
smem_layout_o: cute.Layout,
gmem_tiled_copy_O_partial: cute.TiledCopy,
gmem_tiled_copy_O: cute.TiledCopy,
gmem_tiled_copy_LSE: cute.TiledCopy,
s2r_tiled_copy_LSE: cute.TiledCopy,
seqlen_divmod: FastDivmodDivisor,
head_divmod: FastDivmodDivisor,
varlen: cutlass.Constexpr[bool],
):
# Thread and block indices
tidx, _, _ = cute.arch.thread_idx()
m_block, k_block, batch_idx = cute.arch.block_idx()
# ///////////////////////////////////////////////////////////////////////////////
# Get shared memory buffer
# ///////////////////////////////////////////////////////////////////////////////
smem = cutlass.utils.SmemAllocator()
storage = smem.allocate(SharedStorage)
sLSE = storage.sLSE.get_tensor(smem_layout_lse)
sMaxValidSplit = storage.sMaxValidSplit.get_tensor((self.m_block_size,))
sO = storage.sO.get_tensor(smem_layout_o)
# Handle semaphore reset
if const_expr(semaphore_to_reset is not None):
if (
tidx == 0
and m_block == cute.arch.grid_dim()[0] - 1
and k_block == cute.arch.grid_dim()[1] - 1
and batch_idx == cute.arch.grid_dim()[2] - 1
):
semaphore_to_reset[0] = 0
# Get number of splits
num_splits = (
num_splits_dynamic_ptr[batch_idx]
if const_expr(num_splits_dynamic_ptr is not None)
else mLSE_partial.shape[1]
)
# Handle variable length sequences using SeqlenInfo
seqlen_info = SeqlenInfo.create(
batch_idx=batch_idx,
seqlen_static=mO_partial.shape[0],
cu_seqlens=cu_seqlens,
seqused=seqused,
)
seqlen, offset = seqlen_info.seqlen, seqlen_info.offset
# Extract number of heads (head index will be determined dynamically)
num_head = mO_partial.shape[3]
max_idx = seqlen * num_head
# Early exit for single split if dynamic
if (const_expr(num_splits_dynamic_ptr is None) or num_splits > 1) and (
const_expr(not varlen) or m_block * self.m_block_size < max_idx
):
# ===============================
# Step 1: Load LSE_partial from gmem to shared memory
# ===============================
if const_expr(cu_seqlens is None):
mLSE_partial_cur = mLSE_partial[None, None, None, batch_idx]
else:
mLSE_partial_cur = cute.domain_offset((offset, 0, 0), mLSE_partial)
mLSE_partial_copy = cute.tiled_divide(mLSE_partial_cur, (1,))
gmem_thr_copy_LSE = gmem_tiled_copy_LSE.get_slice(tidx)
tLSEsLSE = gmem_thr_copy_LSE.partition_D(sLSE)
# Create identity tensor for coordinate tracking
cLSE = cute.make_identity_tensor((self.max_splits, self.m_block_size))
tLSEcLSE = gmem_thr_copy_LSE.partition_S(cLSE)
# Load LSE partial values
for m in cutlass.range(cute.size(tLSEcLSE, mode=[2]), unroll_full=True):
mi = tLSEcLSE[0, 0, m][1] # Get m coordinate
idx = m_block * self.m_block_size + mi
if idx < max_idx:
# Calculate actual sequence position and head using FastDivmodDivisor
if const_expr(not varlen):
head_idx, m_idx = divmod(idx, seqlen_divmod)
else:
head_idx = idx // seqlen
m_idx = idx - head_idx * seqlen
mLSE_partial_cur_copy = mLSE_partial_copy[None, m_idx, None, head_idx]
for s in cutlass.range(cute.size(tLSEcLSE, mode=[1]), unroll_full=True):
si = tLSEcLSE[0, s, 0][0] # Get split coordinate
if si < num_splits:
cute.copy(
gmem_thr_copy_LSE,
mLSE_partial_cur_copy[None, si],
tLSEsLSE[None, s, m],
)
else:
tLSEsLSE[None, s, m].fill(-Float32.inf)
# Don't need to zero out the rest of the LSEs, as we will not write the output to gmem
cute.arch.cp_async_commit_group()
# ===============================
# Step 2: Load O_partial for pipeline stages
# ===============================
gmem_thr_copy_O_partial = gmem_tiled_copy_O_partial.get_slice(tidx)
cO = cute.make_identity_tensor((self.m_block_size, self.k_block_size))
tOcO = gmem_thr_copy_O_partial.partition_D(cO)
tOsO_partial = gmem_thr_copy_O_partial.partition_D(sO)
if const_expr(cu_seqlens is None):
mO_partial_cur = mO_partial[None, None, None, None, batch_idx]
else:
mO_partial_cur = cute.domain_offset((offset, 0, 0, 0), mO_partial)
# Precompute these values to avoid recomputing them in the loop
num_rows = const_expr(cute.size(tOcO, mode=[1]))
tOmidx = cute.make_fragment(num_rows, cutlass.Int32)
tOhidx = cute.make_fragment(num_rows, cutlass.Int32)
tOrOptr = cute.make_fragment(num_rows, cutlass.Int64)
for m in cutlass.range(num_rows, unroll_full=True):
mi = tOcO[0, m, 0][0] # m coordinate
idx = m_block * self.m_block_size + mi
if const_expr(not varlen):
tOhidx[m], tOmidx[m] = divmod(idx, seqlen_divmod)
else:
tOhidx[m] = idx // seqlen
tOmidx[m] = idx - tOhidx[m] * seqlen
tOrOptr[m] = utils.elem_pointer(
mO_partial_cur, (tOmidx[m], k_block * self.k_block_size, 0, tOhidx[m])
).toint()
if idx >= max_idx:
tOhidx[m] = -1
tOpO = cute.make_fragment(cute.size(tOcO, [2]), cutlass.Boolean)
if const_expr(not self.is_even_k):
for k in cutlass.range(cute.size(tOpO), unroll_full=True):
tOpO[k] = tOcO[0, 0, k][1] < mO_partial.shape[1] - k_block * self.k_block_size
# if cute.arch.thread_idx()[0] == 0 and k_block == 1: cute.print_tensor(tOpO)
load_O_partial = partial(
self.load_O_partial,
gmem_tiled_copy_O_partial,
tOrOptr,
tOsO_partial,
tOhidx,
tOpO,
tOcO,
mO_partial_cur.layout,
)
# Load first few stages of O_partial
for stage in cutlass.range(self.stages - 1, unroll_full=True):
if stage < num_splits:
load_O_partial(stage, stage)
cute.arch.cp_async_commit_group()
# ===============================
# Step 3: Load and transpose LSE from smem to registers
# ===============================
# Wait for LSE and initial O partial stages to complete
cute.arch.cp_async_wait_group(self.stages - 1)
cute.arch.sync_threads()
# if cute.arch.thread_idx()[0] == 0:
# # cute.print_tensor(sLSE)
# for i in range(64):
# cute.printf("sLSE[%d, 0] = %f", i, sLSE[i, 0])
# cute.arch.sync_threads()
s2r_thr_copy_LSE = s2r_tiled_copy_LSE.get_slice(tidx)
ts2rsLSE = s2r_thr_copy_LSE.partition_S(sLSE)
ts2rrLSE = cute.make_fragment_like(ts2rsLSE)
cute.copy(s2r_tiled_copy_LSE, ts2rsLSE, ts2rrLSE)
# ===============================
# Step 4: Compute final LSE along split dimension
# ===============================
lse_sum = cute.make_fragment(cute.size(ts2rrLSE, mode=[2]), Float32)
ts2rcLSE = s2r_thr_copy_LSE.partition_D(cLSE)
# We compute the max valid split for each row to short-circuit the computation later
max_valid_split = cute.make_fragment(cute.size(ts2rrLSE, mode=[2]), Int32)
assert cute.size(ts2rrLSE, mode=[0]) == 1
# Compute max, scales, and final LSE for each row
for m in cutlass.range(cute.size(ts2rrLSE, mode=[2]), unroll_full=True):
# Find max LSE value across splits
threads_per_col = const_expr(self.smem_threads_per_col_lse)
lse_max = cute.arch.warp_reduction_max(
ts2rrLSE[None, None, m]
.load()
.reduce(cute.ReductionOp.MAX, init_val=-Float32.inf, reduction_profile=0),
threads_in_group=threads_per_col,
)
# if cute.arch.thread_idx()[0] == 0: cute.printf(lse_max)
# Find max valid split index
max_valid_idx = -1
for s in cutlass.range(cute.size(ts2rrLSE, mode=[1]), unroll_full=True):
if ts2rrLSE[0, s, m] != -Float32.inf:
max_valid_idx = ts2rcLSE[0, s, 0][0] # Get split coordinate
# if cute.arch.thread_idx()[0] < 32: cute.printf(max_valid_idx)
max_valid_split[m] = cute.arch.warp_reduction_max(
max_valid_idx, threads_in_group=threads_per_col
)
# Compute exp scales and sum
lse_max_cur = (
0.0 if lse_max == -Float32.inf else lse_max
) # In case all local LSEs are -inf
LOG2_E = math.log2(math.e)
lse_sum_cur = 0.0
for s in cutlass.range(cute.size(ts2rrLSE, mode=[1]), unroll_full=True):
scale = cute.math.exp2(
ts2rrLSE[0, s, m] * LOG2_E - (lse_max_cur * LOG2_E), fastmath=True
)
lse_sum_cur += scale
ts2rrLSE[0, s, m] = scale # Store scale for later use
lse_sum_cur = cute.arch.warp_reduction_sum(
lse_sum_cur, threads_in_group=threads_per_col
)
lse_sum[m] = cute.math.log(lse_sum_cur, fastmath=True) + lse_max
# Normalize scales
inv_sum = (
0.0 if (lse_sum_cur == 0.0 or lse_sum_cur != lse_sum_cur) else 1.0 / lse_sum_cur
)
ts2rrLSE[None, None, m].store(ts2rrLSE[None, None, m].load() * inv_sum)
# Store the scales exp(lse - lse_logsum) back to smem
cute.copy(s2r_tiled_copy_LSE, ts2rrLSE, ts2rsLSE)
# Store max valid split to smem
for m in cutlass.range(cute.size(ts2rrLSE, mode=[2]), unroll_full=True):
if ts2rcLSE[0, 0, m][0] == 0: # Only thread responsible for s=0 writes
mi = ts2rcLSE[0, 0, m][1]
if mi < self.m_block_size:
sMaxValidSplit[mi] = max_valid_split[m]
# ===============================
# Step 5: Store final LSE to gmem
# ===============================
if const_expr(mLSE is not None):
if const_expr(cu_seqlens is None):
mLSE_cur = mLSE[None, None, batch_idx]
else:
mLSE_cur = cute.domain_offset((offset, 0), mLSE)
if k_block == 0: # Only first k_block writes LSE when mLSE is provided
for m in cutlass.range(cute.size(ts2rrLSE, mode=[2]), unroll_full=True):
if ts2rcLSE[0, 0, m][0] == 0: # Only thread responsible for s=0 writes
mi = ts2rcLSE[0, 0, m][1]
idx = m_block * self.m_block_size + mi
if idx < max_idx:
if const_expr(not varlen):
head_idx, m_idx = divmod(idx, seqlen_divmod)
else:
head_idx = idx // seqlen
m_idx = idx - head_idx * seqlen
mLSE_cur[m_idx, head_idx] = lse_sum[m]
# ===============================
# Step 6: Read O_partial and accumulate final O
# ===============================
cute.arch.sync_threads()
# Get max valid split for this thread
thr_max_valid_split = sMaxValidSplit[tOcO[0, 0, 0][0]]
for m in cutlass.range(1, cute.size(tOcO, mode=[1])):
thr_max_valid_split = max(thr_max_valid_split, sMaxValidSplit[tOcO[0, m, 0][0]])
tOrO_partial = cute.make_fragment_like(tOsO_partial[None, None, None, 0])
tOrO = cute.make_fragment_like(tOrO_partial, Float32)
tOrO.fill(0.0)
stage_load = self.stages - 1
stage_compute = 0
# Main accumulation loop
for s in cutlass.range(thr_max_valid_split + 1, unroll=4):
# Get scales for this split
scale = cute.make_fragment(num_rows, Float32)
for m in cutlass.range(num_rows, unroll_full=True):
scale[m] = sLSE[s, tOcO[0, m, 0][0]] # Get scale from smem
# Load next stage if needed
split_to_load = s + self.stages - 1
if split_to_load <= thr_max_valid_split:
load_O_partial(split_to_load, stage_load)
cute.arch.cp_async_commit_group()
stage_load = 0 if stage_load == self.stages - 1 else stage_load + 1
# Wait for the current stage to be ready
cute.arch.cp_async_wait_group(self.stages - 1)
# We don't need __syncthreads() because each thread is just reading its own data from smem
# Copy from smem to registers
cute.autovec_copy(tOsO_partial[None, None, None, stage_compute], tOrO_partial)
stage_compute = 0 if stage_compute == self.stages - 1 else stage_compute + 1
# Accumulate scaled partial results
for m in cutlass.range(num_rows, unroll_full=True):
if tOhidx[m] >= 0 and scale[m] > 0.0:
tOrO[None, m, None].store(
tOrO[None, m, None].load()
+ scale[m] * tOrO_partial[None, m, None].load().to(Float32)
)
# ===============================
# Step 7: Write final O to gmem
# ===============================
rO = cute.make_fragment_like(tOrO, self.dtype)
rO.store(tOrO.load().to(self.dtype))
if const_expr(cu_seqlens is None):
mO_cur = mO[None, None, None, batch_idx]
else:
mO_cur = cute.domain_offset((offset, 0, 0), mO)
mO_cur = utils.domain_offset_aligned((0, k_block * self.k_block_size, 0), mO_cur)
elems_per_store = const_expr(cute.size(gmem_tiled_copy_O.layout_tv_tiled[1]))
# mO_cur_copy = cute.tiled_divide(mO_cur, (1, elems_per_store,))
gmem_thr_copy_O = gmem_tiled_copy_O.get_slice(tidx)
# Write final results
for m in cutlass.range(num_rows, unroll_full=True):
if tOhidx[m] >= 0:
mO_cur_copy = cute.tiled_divide(
mO_cur[tOmidx[m], None, tOhidx[m]], (elems_per_store,)
)
for k in cutlass.range(cute.size(tOcO, mode=[2]), unroll_full=True):
k_idx = tOcO[0, 0, k][1] // elems_per_store
if const_expr(self.is_even_k) or tOpO[k]:
cute.copy(gmem_thr_copy_O, rO[None, m, k], mO_cur_copy[None, k_idx])
@cute.jit
def load_O_partial(
self,
gmem_tiled_copy_O_partial: cute.TiledCopy,
tOrOptr: cute.Tensor,
tOsO_partial: cute.Tensor,
tOhidx: cute.Tensor,
tOpO: cute.Tensor,
tOcO: cute.Tensor,
mO_cur_partial_layout: cute.Layout,
split: Int32,
stage: Int32,
) -> None:
elems_per_load = const_expr(cute.size(gmem_tiled_copy_O_partial.layout_tv_tiled[1]))
tOsO_partial_cur = tOsO_partial[None, None, None, stage]
for m in cutlass.range(cute.size(tOcO, [1]), unroll_full=True):
if tOhidx[m] >= 0:
o_gmem_ptr = cute.make_ptr(
tOsO_partial.element_type, tOrOptr[m], cute.AddressSpace.gmem, assumed_align=16
)
mO_partial_cur = cute.make_tensor(
o_gmem_ptr, cute.slice_(mO_cur_partial_layout, (0, None, None, 0))
)
mO_partial_cur_copy = cute.tiled_divide(mO_partial_cur, (elems_per_load,))
for k in cutlass.range(cute.size(tOcO, mode=[2]), unroll_full=True):
k_idx = tOcO[0, 0, k][1] // elems_per_load
if const_expr(self.is_even_k) or tOpO[k]:
cute.copy(
gmem_tiled_copy_O_partial,
mO_partial_cur_copy[None, k_idx, split],
tOsO_partial_cur[None, m, k],
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/flash_fwd_combine.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 627,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:benchmarks/benchmark_attn.py | from collections import namedtuple
from functools import partial
import math
import os
from typing import NamedTuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
try:
import cudnn
except ImportError:
cudnn = None
# cudnn = None
Timing = NamedTuple('timing', [('mean', float)])
from einops import rearrange, repeat
# from flash_attn.utils.benchmark import benchmark_forward, benchmark_backward, benchmark_combined, benchmark_all, benchmark_fwd_bwd, pytorch_profiler
from flash_attn.cute.benchmark import benchmark_forward, benchmark_backward, benchmark_combined, benchmark_all, benchmark_fwd_bwd, pytorch_profiler
try:
from flash_attn.flash_attn_interface import flash_attn_func, flash_attn_varlen_func
except ImportError:
flash_attn_func = None
flash_attn_varlen_func = None
from flash_attn.cute.interface import flash_attn_func as flash_attn_func_python
from flash_attn.cute.interface import flash_attn_varlen_func as flash_attn_varlen_func_python
try:
from flash_attn_interface import flash_attn_func as flash_attn_func_v3
from flash_attn_interface import flash_attn_varlen_func as flash_attn_varlen_func_v3
except ImportError:
flash_attn_func_v3 = None
flash_attn_varlen_func_v3 = None
if torch.cuda.get_device_capability()[0] != 9:
flash_attn_func_v3 = None
# flash_attn_func_v3 = None
flash_attn_func = None
# flash_attn_func_python = None
from triton.testing import do_bench
attention_ref_mask_cache = {}
def attention_ref(q, k, v, causal=False):
softmax_scale = 1.0 / math.sqrt(q.shape[-1])
scores = torch.einsum('bthd,bshd->bhts', q * softmax_scale, k)
if causal:
if scores.shape[-2] not in attention_ref_mask_cache:
mask = torch.tril(torch.ones(scores.shape[-2:], device=scores.device, dtype=torch.bool), diagonal=0)
attention_ref_mask_cache[scores.shape[-2]] = mask
else:
mask = attention_ref_mask_cache[scores.shape[-2]]
scores = scores.masked_fill(mask, float('-inf'))
attn = torch.softmax(scores, dim=-1)
return torch.einsum('bhts,bshd->bthd', attn, v)
attention_ref = None # Disable the benchmarking for now
def time_fwd(func, *args, repeats=30, verbose=True, desc="", **kwargs):
# # Warmup
# for _ in range(5):
# func(*args, **kwargs)
# time.sleep(1)
# return benchmark_forward(func, *args, **kwargs, repeats=repeats, verbose=verbose, desc=desc)[1]
# s = torch.cuda.Stream()
# s.wait_stream(torch.cuda.current_stream())
# with torch.cuda.stream(s):
# for _ in range(2):
# out = func(*args, **kwargs)
# torch.cuda.current_stream().wait_stream(s)
# graph = torch.cuda.CUDAGraph()
# with torch.cuda.graph(graph):
# out = func(*args, **kwargs)
# time_f = benchmark_forward(lambda: graph.replay(), repeats=repeats, verbose=verbose, desc=desc)
# # return time_f[1].mean
# return time_f[1]
return Timing(do_bench(lambda: func(*args, **kwargs), warmup=5, rep=repeats) * 1e-3)
def flops(batch, nheads, seqlen_q, seqlen_k, headdim, headdim_v, causal=False, window_size=(None, None)):
if causal:
avg_seqlen = (max(0, seqlen_k - seqlen_q) + seqlen_k) / 2
else:
if window_size == (None, None):
avg_seqlen = seqlen_k
else:
row_idx = torch.arange(seqlen_q, device='cuda')
col_left = torch.maximum(row_idx + seqlen_k - seqlen_q - window_size[0], torch.tensor(0)) if window_size[0] is not None else torch.zeros_like(row_idx)
col_right = torch.minimum(row_idx + seqlen_k - seqlen_q + window_size[1], torch.tensor(seqlen_k - 1)) if window_size[1] is not None else torch.full_like(row_idx, seqlen_k - 1)
avg_seqlen = (col_right - col_left + 1).float().mean().item()
return batch * nheads * 2 * seqlen_q * avg_seqlen * (headdim + headdim_v)
def convert_to_cudnn_type(torch_type):
if torch_type == torch.float16:
return cudnn.data_type.HALF
elif torch_type == torch.bfloat16:
return cudnn.data_type.BFLOAT16
elif torch_type == torch.float32:
return cudnn.data_type.FLOAT
elif torch_type == torch.int32:
return cudnn.data_type.INT32
elif torch_type == torch.int64:
return cudnn.data_type.INT64
else:
raise ValueError("Unsupported tensor data type.")
def cudnn_spda_setup(q, k, v, causal=False, window_size_left=None):
b, nheads, seqlen_q, headdim = q.shape
_, nheads_k, seqlen_k, _ = k.shape
headdim_v = v.shape[-1]
assert v.shape == (b, nheads_k, seqlen_k, headdim_v)
assert cudnn is not None, 'CUDNN is not available'
q_gpu, k_gpu, v_gpu = q, k, v
o_gpu = torch.empty((b, nheads, seqlen_q, headdim_v), dtype=q.dtype, device=q.device)
stats_gpu = torch.empty(b, nheads, seqlen_q, 1, dtype=torch.float32, device=q.device)
graph = cudnn.pygraph(
io_data_type=convert_to_cudnn_type(q.dtype),
intermediate_data_type=cudnn.data_type.FLOAT,
compute_data_type=cudnn.data_type.FLOAT,
)
q = graph.tensor_like(q_gpu.detach())
k = graph.tensor_like(k_gpu.detach())
v = graph.tensor_like(v_gpu.detach())
o, stats = graph.sdpa(
name="sdpa",
q=q,
k=k,
v=v,
is_inference=False,
attn_scale=1.0 / math.sqrt(headdim),
# use_causal_mask_bottom_right=causal or window_size_left is not None,
use_causal_mask=causal or window_size_left is not None,
sliding_window_length=window_size_left if window_size_left is not None and not causal else None,
)
o.set_output(True).set_dim(o_gpu.shape).set_stride(o_gpu.stride())
stats.set_output(True).set_data_type(cudnn.data_type.FLOAT)
graph.validate()
graph.build_operation_graph()
graph.create_execution_plans([cudnn.heur_mode.A, cudnn.heur_mode.FALLBACK])
graph.check_support()
graph.build_plans()
variant_pack = {
q: q_gpu,
k: k_gpu,
v: v_gpu,
o: o_gpu,
stats: stats_gpu,
}
workspace = torch.empty(graph.get_workspace_size(), device="cuda", dtype=torch.uint8)
def run(*args, **kwargs):
graph.execute(variant_pack, workspace)
return o_gpu
return run
def cudnn_spda_bwd_setup(q, k, v, o, g, lse, causal=False, window_size_left=None):
b, nheads, seqlen_q, headdim = q.shape
_, nheads_k, seqlen_k, _ = k.shape
headdim_v = v.shape[-1]
assert v.shape == (b, nheads_k, seqlen_k, headdim_v)
assert g.shape == (b, nheads, seqlen_q, headdim_v)
assert o.shape == (b, nheads, seqlen_q, headdim_v)
assert lse.shape == (b, nheads, seqlen_q, 1)
assert cudnn is not None, 'CUDNN is not available'
q_gpu, k_gpu, v_gpu, o_gpu, g_gpu = q, k, v, o, g
dq_gpu = torch.empty_like(q_gpu)
dk_gpu = torch.empty_like(k_gpu)
dv_gpu = torch.empty_like(v_gpu)
graph = cudnn.pygraph(
io_data_type=convert_to_cudnn_type(q.dtype),
intermediate_data_type=cudnn.data_type.FLOAT,
compute_data_type=cudnn.data_type.FLOAT,
)
q = graph.tensor_like(q_gpu.detach())
k = graph.tensor_like(k_gpu.detach())
v = graph.tensor_like(v_gpu.detach())
o = graph.tensor_like(o_gpu.detach())
g = graph.tensor_like(g_gpu.detach())
stats = graph.tensor_like(lse.detach())
dq, dk, dv = graph.sdpa_backward(
name="sdpa_backward",
q=q,
k=k,
v=v,
o=o,
dO=g,
stats=stats,
attn_scale=1.0 / math.sqrt(headdim),
# use_causal_mask_bottom_right=causal or window_size_left is not None,
use_causal_mask=causal or window_size_left is not None,
sliding_window_length=window_size_left if window_size_left is not None and not causal else None,
use_deterministic_algorithm=False,
)
dq.set_output(True).set_dim(dq_gpu.shape).set_stride(dq_gpu.stride())
dk.set_output(True).set_dim(dk_gpu.shape).set_stride(dk_gpu.stride())
dv.set_output(True).set_dim(dv_gpu.shape).set_stride(dv_gpu.stride())
graph.validate()
graph.build_operation_graph()
graph.create_execution_plans([cudnn.heur_mode.A, cudnn.heur_mode.FALLBACK])
graph.check_support()
graph.build_plans()
variant_pack = {
q: q_gpu,
k: k_gpu,
v: v_gpu,
o: o_gpu,
g: g_gpu,
stats: lse,
dq: dq_gpu,
dk: dk_gpu,
dv: dv_gpu,
}
workspace = torch.empty(graph.get_workspace_size(), device="cuda", dtype=torch.uint8)
def run(*args, **kwargs):
graph.execute(variant_pack, workspace)
return dq_gpu, dk_gpu, dv_gpu
return run
torch.manual_seed(0)
repeats = 10
dropout_p = 0.0
causal = False
dtype = torch.bfloat16
# dtype = torch.float8_e4m3fn
dtype_gen = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
device = 'cuda'
verbose = True
varlen = False
has_backward = True
page_size = None
# page_size = 128
softcap = 0.0
V_colmajor = False
deterministic = False
batch_size = 2
# seqlen = 2048
seqlen = 8192
# seqlen = 4096
# seqlen = 2047
dim = 2048
# headdim = 128
# headdim = 64
headdim = 256
# for headdim in [64, 128, 256]:
# bs_seqlen_vals = [(32, 512), (16, 1024), (8, 2048), (4, 4096), (2, 8192), (1, 16384)]
# bs_seqlen_vals = [(32, 1024), (16, 2048), (8, 4096), (4, 8192), (2, 16384), (1, 32768)]
# bs_seqlen_vals = [(32, 512), (16, 1024)]
# bs_seqlen_vals = [(2, 64 * 132)]
bs_seqlen_vals = [(4, 8192)]
# bs_seqlen_vals = [(1, 16 * 1024)]
time_f = {}
time_b = {}
# for headdim in [64, 128, 256]:
# for headdim in [64, 96, 128, 192]:
# for headdim in [64, 96, 128, 192, 256]:
# for headdim in [64, 96, 128]:
# for headdim in [64, 128, 256]:
# for headdim in [64, 96, 128, 192, 256]:
# for headdim in [64, 128, 192]:
# for headdim in [192]:
for headdim in [128]:
# nheads = dim // headdim
nheads = 32 if headdim <= 64 else 16 if headdim <= 192 else 8
# nheads = 128
# headdim = 64
# batch_size = 64
# seqlen = 512
# nheads = 8
# headdim = 128
nheads_kv = nheads
# nheads_kv = nheads // 8
# nheads_kv = 1
# headdim_v = headdim
headdim_v = 128 if headdim == 192 else headdim
# headdim_v = 512
has_qv = headdim == 64 and headdim_v == 512
# has_qv = False
# sinks = torch.randn(nheads, dtype=torch.bfloat16, device=device)
sinks = None
for batch_size, seqlen in bs_seqlen_vals:
num_splits = 0
# window_size = (-1, -1)
window_size = (None, None)
window_size_fa = (-1, -1)
# window_size = (seqlen // 2 - 1, 0)
pack_gqa = None
# seqlen_q = 64
seqlen_q = seqlen
leftpad_k = None
# leftpad_k = torch.full((batch_size,), 0, device=device, dtype=torch.int32)
q = torch.randn(batch_size, seqlen_q, nheads, headdim, device=device, dtype=dtype_gen, requires_grad=has_backward)
k = torch.randn(batch_size, seqlen, nheads_kv, headdim, device=device, dtype=dtype_gen, requires_grad=has_backward)
v = torch.randn(batch_size, seqlen, nheads_kv, headdim_v, device=device, dtype=dtype_gen, requires_grad=has_backward)
q, k, v = [x.detach().to(dtype).requires_grad_(has_backward) for x in [q, k, v]]
v_colmajor = v.detach().transpose(-1, -3).contiguous().transpose(-1, -3).requires_grad_(has_backward)
v_fa3 = v if not V_colmajor else v_colmajor
qv = torch.randn(batch_size, seqlen_q, nheads, headdim_v, device=device, dtype=dtype_gen) if has_qv else None
# q = torch.randint(-2, 3, (batch_size, seqlen, nheads, headdim), device=device, dtype=torch.int32).to(dtype)
# k = torch.randint(-2, 3, (batch_size, seqlen, nheads, headdim), device=device, dtype=torch.int32).to(dtype)
# v = torch.randint(-2, 3, (batch_size, seqlen, nheads, headdim_v), device=device, dtype=torch.int32).to(dtype)
g = torch.randn(batch_size, seqlen_q, nheads, headdim_v, device=device, dtype=dtype_gen)
o = torch.randn(batch_size, seqlen_q, nheads, headdim_v, device=device, dtype=dtype_gen)
stats = torch.randn(batch_size, seqlen_q, nheads, 1, device=device, dtype=torch.float32)
if varlen:
q_unpad, k_unpad, v_unpad = [rearrange(x.detach(), "b s h d -> (b s) h d").requires_grad_(has_backward) for x in [q, k, v]]
cu_seqlens_q = torch.arange(batch_size + 1, device=device, dtype=torch.int32) * seqlen_q
cu_seqlens_k = torch.arange(batch_size + 1, device=device, dtype=torch.int32) * seqlen if page_size is None else None
# cu_seqlens_q = torch.tensor([0, 248, 249, 250, 251, 252, 253, 254, 255, 256], device=device, dtype=torch.int32)
# q_unpad = q_unpad[:256]
# seqlen_q = 256
# cu_seqlens_q = torch.tensor([0, 376, 377, 378, 379, 380, 381, 382, 383, 384], device=device, dtype=torch.int32)
# q_unpad = q_unpad[:384]
# seqlen_q = 384
if page_size is not None:
assert seqlen % page_size == 0
k_paged, v_paged = [rearrange(x, "b (n p) h d -> (b n) p h d", p=page_size) for x in [k, v]]
page_table = rearrange(torch.arange(batch_size * seqlen // page_size, device=device, dtype=torch.int32),
"(b s) -> b s", s=seqlen // page_size)
else:
page_table = None
for causal in [False, True]:
# for causal in [True]:
print(f"\n### {headdim = }, {causal = }, {seqlen = }, {batch_size = }, {nheads = }, {nheads_kv = }, {varlen = }, {deterministic = } ###")
nFLOPS = flops(batch_size, nheads, seqlen_q, seqlen, headdim if not has_qv else headdim + headdim_v, headdim_v, causal=causal, window_size=window_size)
if cudnn is not None:
# if False:
if headdim <= 256 and dtype != torch.float8_e4m3fn:
cudnn_spda = cudnn_spda_setup(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), causal=causal, window_size_left=window_size[0])
if has_backward:
cudnn_spda_bwd = cudnn_spda_bwd_setup(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), o.transpose(1, 2), g.transpose(1, 2), stats.transpose(1, 2), causal=causal, window_size_left=window_size[0])
if dtype != torch.float8_e4m3fn and attention_ref is not None:
ms = time_fwd(attention_ref, q, k, v, causal=causal, repeats=repeats, verbose=verbose, desc='Standard')
time_f[(causal, headdim, batch_size, seqlen), "Standard"] = ms.mean
if has_backward:
time.sleep(1)
_, msb = benchmark_backward(attention_ref, q, k, v, causal=causal, repeats=repeats, verbose=False, desc='Standard')
time_b[(causal, headdim, batch_size, seqlen), "Standard"] = msb.mean
if dtype != torch.float8_e4m3fn and headdim == headdim_v and flash_attn_func is not None:
if not varlen:
m0 = time_fwd(flash_attn_func, q, k, v, dropout_p, causal=causal, window_size=window_size_fa, softcap=softcap, repeats=repeats, verbose=verbose, desc='Fav2')
else:
m0 = time_fwd(flash_attn_varlen_func, q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen, dropout_p, causal=causal, window_size=window_size_fa, softcap=softcap, repeats=repeats, verbose=verbose, desc='Fav2')
time_f[(causal, headdim, batch_size, seqlen), "Flash2"] = m0.mean
if has_backward:
time.sleep(1)
if not varlen:
_, m0b = benchmark_backward(flash_attn_func, q, k, v, dropout_p, causal=causal, window_size=window_size_fa, softcap=softcap, deterministic=deterministic,
repeats=repeats, verbose=False, desc='Fav2')
else:
_, m0b = benchmark_backward(flash_attn_varlen_func, q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen, dropout_p, causal=causal, window_size=window_size_fa, softcap=softcap, deterministic=deterministic,
repeats=repeats, verbose=False, desc='Fav2')
time_b[(causal, headdim, batch_size, seqlen), "Flash2"] = m0b.mean
# pytorch_profiler(flash_attn_func, q, k, v, dropout_p, causal=causal, backward=True)
if cudnn is not None:
# if False:
if headdim <= 256 and dtype != torch.float8_e4m3fn:
time.sleep(1) # Sleep to avoid residual power throttling from the previous benchmark
m2 = time_fwd(cudnn_spda, repeats=repeats, verbose=verbose, desc='CuDNN')
time_f[(causal, headdim, batch_size, seqlen), "cuDNN"] = m2.mean
if has_backward:
time.sleep(1)
m2b = time_fwd(cudnn_spda_bwd, repeats=repeats, verbose=verbose, desc='CuDNN')
time_b[(causal, headdim, batch_size, seqlen), "cuDNN"] = m2b.mean
# pytorch_profiler(cudnn_spda, backward=False)
# pytorch_profiler(cudnn_spda_bwd, backward=False)
time.sleep(1)
if flash_attn_func_v3 is not None:
if not varlen:
# m1 = time_fwd(flash_attn_func_v3, q, k if page_size is None else k_paged, v_fa3 if page_size is None else v_paged, cache_leftpad = leftpad_k, page_table=page_table, causal=causal, window_size=window_size, softcap=softcap, num_splits=num_splits, pack_gqa=pack_gqa, repeats=repeats, verbose=verbose, desc='Fav3')
m1 = time_fwd(flash_attn_func_v3, q, k if page_size is None else k_paged, v_fa3 if page_size is None else v_paged, causal=causal, window_size=window_size_fa, softcap=softcap, num_splits=num_splits, pack_gqa=pack_gqa, repeats=repeats, verbose=verbose, desc='Fav3')
# pytorch_profiler(flash_attn_func_v3, q, k if page_size is None else k_paged, v_fa3 if page_size is None else v_paged, page_table=page_table, causal=causal, window_size=window_size, softcap=softcap, num_splits=num_splits, pack_gqa=pack_gqa)
else:
m1 = time_fwd(flash_attn_varlen_func_v3, q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen, causal=causal, window_size=window_size_fa, softcap=softcap, num_splits=num_splits, pack_gqa=pack_gqa, repeats=repeats, verbose=verbose, desc='Fav3')
# pytorch_profiler(flash_attn_varlen_func_v3, q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen, causal=causal, window_size=window_size, softcap=softcap, num_splits=num_splits)
time_f[(causal, headdim, batch_size, seqlen), "Flash3"] = m1.mean
if flash_attn_func_python is not None:
if not varlen:
m1_py = time_fwd(flash_attn_func_python, q, k if page_size is None else k_paged, v_fa3 if page_size is None else v_paged, causal=causal, window_size=window_size, learnable_sink=sinks, softcap=softcap, pack_gqa=pack_gqa, repeats=repeats, verbose=verbose, desc='Fav3 python')
else:
m1_py = time_fwd(flash_attn_varlen_func_python, q_unpad, k_unpad if page_size is None else k_paged, v_unpad if page_size is None else v_paged, cu_seqlens_q, cu_seqlens_k, page_table=page_table, causal=causal, window_size=window_size, softcap=softcap, pack_gqa=pack_gqa, repeats=repeats, verbose=verbose, desc='Fav3 python')
if dtype != torch.float8_e4m3fn and headdim == headdim_v and flash_attn_func_v3 is not None and has_backward:
time.sleep(1)
if not varlen:
_, m1b = benchmark_backward(flash_attn_func_v3, q, k, v, causal=causal, softcap=softcap, repeats=repeats, verbose=False, desc='Fav3')
else:
_, m1b = benchmark_backward(flash_attn_varlen_func_v3, q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen, causal=causal, window_size=window_size, softcap=softcap, deterministic=deterministic,
repeats=repeats, verbose=False, desc='Fav3')
time_b[(causal, headdim, batch_size, seqlen), "Flash3"] = m1b.mean
time.sleep(1)
# if not varlen:
# pytorch_profiler(flash_attn_func_v3, q, k, v, causal=causal, deterministic=deterministic, backward=True)
# else:
# pytorch_profiler(flash_attn_varlen_func_v3, q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen, causal=causal, deterministic=deterministic, backward=True)
# benchmark_forward(torch.clone, k, repeats=repeats, verbose=verbose, desc='Memcpy')
if dtype != torch.float8_e4m3fn and flash_attn_func_python is not None and has_backward:
if not varlen:
_, m1b_py = benchmark_backward(flash_attn_func_python, q, k, v, causal=causal, softcap=softcap, deterministic=deterministic, repeats=repeats, verbose=False, desc='Fav4 python')
else:
_, m1b_py = benchmark_backward(flash_attn_varlen_func_python, q_unpad, k_unpad, v_unpad, cu_seqlens_q, cu_seqlens_k, causal=causal, softcap=softcap, deterministic=deterministic, repeats=repeats, verbose=False, desc='Fav4 python')
if dtype != torch.float8_e4m3fn and attention_ref is not None:
print(f'Standard fwd: {ms.mean * 1e3:.3f}ms, {(nFLOPS / ms.mean * 1e-12):.1f} TFLOPS')
if has_backward:
print(f'Standard bwd: {msb.mean * 1e3:.3f}ms, {(2.5 * nFLOPS / msb.mean * 1e-12):.1f} TFLOPS')
if dtype != torch.float8_e4m3fn and headdim == headdim_v and flash_attn_func is not None:
# if False:
print(f'FAv2 fwd: {m0.mean * 1e3:.3f}ms, {(nFLOPS / m0.mean * 1e-12):.1f} TFLOPS')
if has_backward:
print(f'FAv2 bwd: {m0b.mean * 1e3:.3f}ms, {(2.5 * nFLOPS / m0b.mean * 1e-12):.1f} TFLOPS')
if cudnn is not None:
print(f'CuDNN fwd: {m2.mean * 1e3:.3f}ms, {(nFLOPS / m2.mean * 1e-12):.1f} TFLOPS')
if has_backward:
print(f'CuDNN bwd: {m2b.mean * 1e3:.3f}ms, {(2.5 * nFLOPS / m2b.mean * 1e-12):.1f} TFLOPS')
if flash_attn_func_v3 is not None:
print(f'FAv3 fwd: {m1.mean * 1e3:.3f}ms, {(nFLOPS / m1.mean * 1e-12):.1f} TFLOPS')
if dtype != torch.float8_e4m3fn and headdim == headdim_v and has_backward:
print(f'FAv3 bwd: {m1b.mean * 1e3:.3f}ms, {(2.5 * nFLOPS / m1b.mean * 1e-12):.1f} TFLOPS')
if flash_attn_func_python is not None:
print(f'FA Python fwd: {m1_py.mean * 1e3:.3f}ms, {(nFLOPS / m1_py.mean * 1e-12):.1f} TFLOPS')
if dtype != torch.float8_e4m3fn and has_backward:
print(f'FA Python bwd: {m1b_py.mean * 1e3:.3f}ms, {(2.5 * nFLOPS / m1b_py.mean * 1e-12):.1f} TFLOPS')
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "benchmarks/benchmark_attn.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 406,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/cute/fast_math.py | # Copyright (c) 2025, Tri Dao.
import cutlass
import cutlass.cute as cute
from cutlass import Int32
@cute.jit
def clz(x: Int32) -> Int32:
# for i in cutlass.range_constexpr(32):
# if (1 << (31 - i)) & x:
# return Int32(i)
# return Int32(32)
# Early exit is not supported yet
res = Int32(32)
done = False
for i in cutlass.range(32):
if ((1 << (31 - i)) & x) and not done:
res = Int32(i)
done = True
return res
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/fast_math.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/tile_scheduler.py | # Copyright (c) 2025, Tri Dao.
from typing import Optional, Tuple
from dataclasses import dataclass
try:
from typing import override
except ImportError: # Python < 3.12
from typing_extensions import override
import cutlass
from cutlass._mlir import ir
import cutlass.cute as cute
from cutlass import Int32, const_expr
from cutlass.cute import FastDivmodDivisor
from quack.cute_dsl_utils import ParamsBase
import flash_attn.cute.utils as utils
from flash_attn.cute.fast_math import clz
class WorkTileInfo(cutlass.utils.WorkTileInfo):
"""Altered WorkTileInfo which includes four axes: (block, head, batch, split)"""
@override
def __new_from_mlir_values__(self, values: list[ir.Value]) -> "WorkTileInfo":
assert len(values) == 5
new_tile_idx = cutlass.new_from_mlir_values(self._tile_idx, values[:-1])
new_is_valid_tile = cutlass.new_from_mlir_values(self._is_valid_tile, [values[-1]])
return WorkTileInfo(new_tile_idx, new_is_valid_tile)
@dataclass
class TileSchedulerArguments(ParamsBase):
num_block: Int32
num_head: Int32
num_batch: Int32
num_splits: Int32
seqlen_k: Int32
headdim: Int32
headdim_v: Int32
total_q: Int32
tile_shape_mn: cutlass.Constexpr[Tuple[int, int]]
cluster_shape_mn: cutlass.Constexpr[Tuple[int, int]] = (1, 1)
mCuSeqlensQ: Optional[cute.Tensor] = None
mSeqUsedQ: Optional[cute.Tensor] = None
qhead_per_kvhead_packgqa: cutlass.Constexpr[int] = 1
element_size: cutlass.Constexpr[int] = 2
is_persistent: cutlass.Constexpr[bool] = False
lpt: cutlass.Constexpr[bool] = False
is_split_kv: cutlass.Constexpr[bool] = False
head_swizzle: cutlass.Constexpr[bool] = False
class SingleTileScheduler:
@dataclass
class Params(ParamsBase):
num_block: Int32
num_head: Int32
num_batch: Int32
num_splits: Int32
num_splits_divmod: FastDivmodDivisor
is_split_kv: cutlass.Constexpr[bool] = False
cluster_shape_mn: cutlass.Constexpr[Tuple[int, int]] = (1, 1)
@staticmethod
def create(
args: TileSchedulerArguments, *, loc=None, ip=None
) -> "SingleTileScheduler.Params":
return SingleTileScheduler.Params(
args.num_block,
args.num_head,
args.num_batch,
args.num_splits,
FastDivmodDivisor(args.num_splits),
args.is_split_kv,
args.cluster_shape_mn,
)
def __init__(self, params: Params, blk_coord: cute.Coord, *, loc=None, ip=None):
self.params = params
self._blk_coord = blk_coord
self._is_first_block = True
self._loc = loc
self._ip = ip
@staticmethod
def to_underlying_arguments(args: TileSchedulerArguments, *, loc=None, ip=None) -> Params:
return SingleTileScheduler.Params.create(args, loc=loc, ip=ip)
@staticmethod
def create(params: Params, *, loc=None, ip=None) -> "SingleTileScheduler":
# if const_expr(cute.size(params.cluster_shape_mn) == 1):
# blk_coord = cute.arch.block_idx()
# else:
# # All CTAs in a cluster must get the same block coordinate
# blk_coord = cute.arch.cluster_idx()
# Temporary set to block_idx until we sort out the best way to handle cluster
blk_coord = cute.arch.block_idx()
return SingleTileScheduler(params, blk_coord, loc=loc, ip=ip)
# called by host
@staticmethod
def get_grid_shape(
params: Params,
*,
loc=None,
ip=None,
) -> Tuple[Int32, Int32, Int32]:
# TODO: this hard-codes the fact that we only use cluster = (1, 1) or (2, 1)
assert params.cluster_shape_mn[1] == 1, "Only cluster_shape_mn[1] == 1 is supported"
return (
cute.round_up(params.num_block, params.cluster_shape_mn[0]),
params.num_head * params.num_splits,
params.num_batch,
)
def get_current_work(self, *, loc=None, ip=None) -> WorkTileInfo:
block_idx, head_idx, batch_idx = self._blk_coord
if const_expr(self.params.is_split_kv):
head_idx, split_idx = divmod(head_idx, self.params.num_splits_divmod)
else:
split_idx = Int32(0)
return WorkTileInfo(
(block_idx, head_idx, batch_idx, split_idx),
self._is_first_block,
)
def initial_work_tile_info(self, *, loc=None, ip=None):
return self.get_current_work(loc=loc, ip=ip)
def prefetch_next_work(self, *, loc=None, ip=None):
pass
def advance_to_next_work(self, *, loc=None, ip=None):
self._is_first_block = False
def __extract_mlir_values__(self):
values, self._values_pos = [], []
for obj in [self.params, self._blk_coord]:
obj_values = cutlass.extract_mlir_values(obj)
values += obj_values
self._values_pos.append(len(obj_values))
return values
def __new_from_mlir_values__(self, values):
obj_list = []
for obj, n_items in zip([self.params, self._blk_coord], self._values_pos):
obj_list.append(cutlass.new_from_mlir_values(obj, values[:n_items]))
values = values[n_items:]
return SingleTileScheduler(*(tuple(obj_list)), loc=self._loc)
class StaticPersistentTileScheduler:
@dataclass
class Params(ParamsBase):
num_block_cluster_divmod: FastDivmodDivisor
num_head_divmod: FastDivmodDivisor
total_blocks_cluster: Int32
cluster_shape_m: cutlass.Constexpr[int] = 1
@staticmethod
def create(
args: TileSchedulerArguments, *, loc=None, ip=None
) -> "StaticPersistentTileScheduler.Params":
num_block_cluster = cute.ceil_div(args.num_block, cute.size(args.cluster_shape_mn))
total_blocks_cluster = num_block_cluster * args.num_head * args.num_batch
return StaticPersistentTileScheduler.Params(
FastDivmodDivisor(num_block_cluster),
FastDivmodDivisor(args.num_head),
total_blocks_cluster,
cluster_shape_m=args.cluster_shape_mn[0],
)
def __init__(self, params: Params, tile_idx: Int32, *, loc=None, ip=None):
self.params = params
self._tile_idx = tile_idx
self._loc = loc
self._ip = ip
@staticmethod
def to_underlying_arguments(args: TileSchedulerArguments, *, loc=None, ip=None) -> Params:
return StaticPersistentTileScheduler.Params.create(args, loc=loc, ip=ip)
@staticmethod
def create(params: Params, *, loc=None, ip=None) -> "StaticPersistentTileScheduler":
if const_expr(cute.size(params.cluster_shape_m) == 1):
tile_idx = cute.arch.block_idx()[0]
else:
tile_idx = cute.arch.cluster_idx()[0]
return StaticPersistentTileScheduler(params, tile_idx, loc=loc, ip=ip)
# called by host
@staticmethod
def get_grid_shape(
params: Params,
*,
loc=None,
ip=None,
) -> Tuple[Int32, Int32, Int32]:
hardware_info = cutlass.utils.HardwareInfo()
sm_count = hardware_info.get_device_multiprocessor_count()
# Grid must be a multiple of cluster_shape_m for CUDA cluster launch.
max_ctas = (sm_count // params.cluster_shape_m) * params.cluster_shape_m
grid_x = cutlass.min(max_ctas, params.total_blocks_cluster * params.cluster_shape_m)
return (grid_x, Int32(1), Int32(1))
# @cute.jit
def get_current_work(self, *, loc=None, ip=None) -> WorkTileInfo:
hn_idx, block_idx = divmod(self._tile_idx, self.params.num_block_cluster_divmod)
batch_idx, head_idx = divmod(hn_idx, self.params.num_head_divmod)
is_valid = self._tile_idx < self.params.total_blocks_cluster
# if cute.arch.thread_idx()[0] == 0:
# cute.printf("TileScheduler: tile_idx=%d, hn_idx=%d, block_idx=%d, batch_idx=%d, head_idx=%d, is_valid=%d", self._tile_idx, hn_idx, block_idx, batch_idx, head_idx, is_valid)
return WorkTileInfo(
(Int32(block_idx), Int32(head_idx), Int32(batch_idx), Int32(0)), is_valid
)
def initial_work_tile_info(self, *, loc=None, ip=None):
return self.get_current_work(loc=loc, ip=ip)
def prefetch_next_work(self, *, loc=None, ip=None):
pass
def advance_to_next_work(self, *, loc=None, ip=None):
if const_expr(self.params.cluster_shape_m == 1):
self._tile_idx += cute.arch.grid_dim()[0]
else:
self._tile_idx += cute.arch.cluster_dim()[0]
def __extract_mlir_values__(self):
values, self._values_pos = [], []
for obj in [self.params, self._tile_idx]:
obj_values = cutlass.extract_mlir_values(obj)
values += obj_values
self._values_pos.append(len(obj_values))
return values
def __new_from_mlir_values__(self, values):
obj_list = []
for obj, n_items in zip(
[self.params, self._tile_idx],
self._values_pos,
):
obj_list.append(cutlass.new_from_mlir_values(obj, values[:n_items]))
values = values[n_items:]
return StaticPersistentTileScheduler(*(tuple(obj_list)), loc=self._loc)
class SingleTileLPTScheduler:
@dataclass
class Params(ParamsBase):
total_blocks: Int32
num_splits: Int32
num_block: Int32
l2_minor: Int32
num_block_divmod: FastDivmodDivisor
num_head_divmod: FastDivmodDivisor
l2_minor_divmod: FastDivmodDivisor
l2_major_divmod: FastDivmodDivisor
l2_minor_residual_divmod: FastDivmodDivisor
num_hb_quotient: Int32
is_split_kv: cutlass.Constexpr[bool] = False
@staticmethod
@cute.jit
def create(
args: TileSchedulerArguments, *, loc=None, ip=None
) -> "SingleTileLPTScheduler.Params":
# cute.printf(args.num_block, args.num_head, args.num_batch, args.seqlen_k, args.headdim, args.headdim_v, args.total_q, args.tile_shape_mn, args.qhead_per_kvhead_packgqa, args.element_size)
size_one_kv_head = args.seqlen_k * (args.headdim + args.headdim_v) * args.element_size
size_one_head = size_one_kv_head
size_l2 = 50 * 1024 * 1024 # 40 MB for K & V
# Swizzle is the size of each "section". Round swizzle to a power of 2
# Need to be careful about the case where only one head will fit
# swizzle is how many heads can fit in L2
# swizzle = 1 if size_l2 < size_one_head else (size_l2 // size_one_head)
# Seems faster if swizzle if a power of 2
log2_floor = lambda n: 31 - clz(n)
swizzle = 1 if size_l2 < size_one_head else (1 << log2_floor(size_l2 // size_one_head))
# swizzle = 1 if size_l2 < size_one_head else (size_l2 // size_one_head)
# If we're in the last section (called residual), we don't want to divide by
# swizzle. Instead we want to divide by the remainder.
num_hb_quotient = (args.num_head * args.num_batch) // swizzle
num_hb_remainder = (args.num_head * args.num_batch) % swizzle
return SingleTileLPTScheduler.Params(
total_blocks=args.num_block * args.num_head * args.num_batch,
num_block=args.num_block,
l2_minor=Int32(swizzle),
num_block_divmod=FastDivmodDivisor(args.num_block),
num_head_divmod=FastDivmodDivisor(args.num_head),
l2_minor_divmod=FastDivmodDivisor(swizzle),
l2_major_divmod=FastDivmodDivisor(swizzle * args.num_block),
l2_minor_residual_divmod=FastDivmodDivisor(
max(num_hb_remainder, 1)
), # don't divide by 0
num_hb_quotient=Int32(num_hb_quotient),
num_splits=args.num_splits,
is_split_kv=args.is_split_kv,
)
def __init__(self, params: Params, tile_idx: Int32, split_idx: Int32, *, loc=None, ip=None):
self.params = params
self._tile_idx = tile_idx
self._split_idx = split_idx
self._loc = loc
self._ip = ip
@staticmethod
def to_underlying_arguments(args: TileSchedulerArguments, *, loc=None, ip=None) -> Params:
return SingleTileLPTScheduler.Params.create(args, loc=loc, ip=ip)
@staticmethod
@cute.jit
def create(params: Params, *, loc=None, ip=None) -> "SingleTileLPTScheduler":
tile_idx, split_idx, _ = cute.arch.block_idx()
return SingleTileLPTScheduler(params, tile_idx, split_idx, loc=loc, ip=ip)
# called by host
@staticmethod
def get_grid_shape(
params: Params,
*,
loc=None,
ip=None,
) -> Tuple[Int32, Int32, Int32]:
return (params.total_blocks, params.num_splits, Int32(1))
@cute.jit
def get_current_work(self, *, loc=None, ip=None) -> WorkTileInfo:
params = self.params
# Implement LPT scheduling coordinate calculation
bidhb, l2_mod = divmod(self._tile_idx, params.l2_major_divmod)
# If we're in the last section (called residual), we don't want to divide by
# swizzle. Instead we want to divide by the remainder.
block, bidhb_residual = 0, 0
if bidhb < params.num_hb_quotient:
block, bidhb_residual = divmod(l2_mod, params.l2_minor_divmod)
else:
block, bidhb_residual = divmod(l2_mod, params.l2_minor_residual_divmod)
bidhb_actual = bidhb * params.l2_minor + bidhb_residual
batch_idx, head_idx = divmod(bidhb_actual, params.num_head_divmod)
# Longest-processing-time-first
block = params.num_block - 1 - block
is_valid = self._tile_idx < params.total_blocks
return WorkTileInfo(
(Int32(block), Int32(head_idx), Int32(batch_idx), Int32(self._split_idx)), is_valid
)
def initial_work_tile_info(self, *, loc=None, ip=None):
return self.get_current_work(loc=loc, ip=ip)
def prefetch_next_work(self, *, loc=None, ip=None):
pass
def advance_to_next_work(self, *, loc=None, ip=None):
# Single tile scheduler - set to invalid tile_idx to indicate no more work
self._tile_idx = self.params.total_blocks
def __extract_mlir_values__(self):
values, self._values_pos = [], []
for obj in [self.params, self._tile_idx, self._split_idx]:
obj_values = cutlass.extract_mlir_values(obj)
values += obj_values
self._values_pos.append(len(obj_values))
return values
def __new_from_mlir_values__(self, values):
obj_list = []
for obj, n_items in zip([self.params, self._tile_idx, self._split_idx], self._values_pos):
obj_list.append(cutlass.new_from_mlir_values(obj, values[:n_items]))
values = values[n_items:]
return self.__class__(*(tuple(obj_list)), loc=self._loc)
class SingleTileLPTBwdScheduler:
@dataclass
class Params(ParamsBase):
total_blocks: Int32
num_block: Int32
l2_minor: Int32
num_head_divmod: FastDivmodDivisor
l2_minor_divmod: FastDivmodDivisor
l2_major_divmod: FastDivmodDivisor
l2_minor_residual_divmod: FastDivmodDivisor
num_hb_quotient: Int32
cluster_shape_mn: cutlass.Constexpr[Tuple[int, int]] = (1, 1)
spt: cutlass.Constexpr[bool] = True
@staticmethod
@cute.jit
def create(
args: TileSchedulerArguments, *, loc=None, ip=None
) -> "SingleTileLPTBwdScheduler.Params":
size_l2 = 50 * 1024 * 1024
size_one_qdo_head = args.seqlen_k * (args.headdim + args.headdim_v) * args.element_size
# size_one_dqaccum_head = args.seqlen_k * (args.headdim) * 4
size_one_dqaccum_head = 0
size_one_head = size_one_qdo_head + size_one_dqaccum_head
log2_floor = lambda n: 31 - clz(n)
swizzle = 1 if size_l2 < size_one_head else (1 << log2_floor(size_l2 // size_one_head))
# swizzle = 8
# If we're in the last section (called residual), we don't want to divide by
# swizzle. Instead we want to divide by the remainder.
num_hb_quotient = (args.num_head * args.num_batch) // swizzle
num_hb_remainder = (args.num_head * args.num_batch) % swizzle
num_block = cute.ceil_div(args.num_block, args.cluster_shape_mn[0])
return SingleTileLPTBwdScheduler.Params(
total_blocks=(num_block * args.cluster_shape_mn[0])
* args.num_head
* args.num_batch,
num_block=num_block,
l2_minor=Int32(swizzle),
num_head_divmod=FastDivmodDivisor(args.num_head),
l2_minor_divmod=FastDivmodDivisor(swizzle),
l2_major_divmod=FastDivmodDivisor(swizzle * num_block),
l2_minor_residual_divmod=FastDivmodDivisor(
max(num_hb_remainder, 1)
), # don't divide by 0
num_hb_quotient=Int32(num_hb_quotient),
cluster_shape_mn=args.cluster_shape_mn,
spt=args.lpt,
)
def __init__(self, params: Params, tile_idx: Int32, *, loc=None, ip=None):
self.params = params
self._tile_idx = tile_idx
self._loc = loc
self._ip = ip
@staticmethod
def to_underlying_arguments(args: TileSchedulerArguments, *, loc=None, ip=None) -> Params:
return SingleTileLPTBwdScheduler.Params.create(args, loc=loc, ip=ip)
@staticmethod
@cute.jit
def create(params: Params, *, loc=None, ip=None) -> "SingleTileLPTBwdScheduler":
tile_idx = cute.arch.block_idx()[0]
return SingleTileLPTBwdScheduler(params, tile_idx, loc=loc, ip=ip)
# called by host
@staticmethod
def get_grid_shape(
params: Params,
*,
loc=None,
ip=None,
) -> Tuple[Int32, Int32, Int32]:
return (params.total_blocks, Int32(1), Int32(1))
@cute.jit
def get_current_work(self, *, loc=None, ip=None) -> cutlass.utils.WorkTileInfo:
cluster_idx = self._tile_idx // self.params.cluster_shape_mn[0]
params = self.params
# Implement LPT scheduling coordinate calculation
bidhb, l2_mod = divmod(cluster_idx, params.l2_major_divmod)
# If we're in the last section (called residual), we don't want to divide by
# swizzle. Instead we want to divide by the remainder.
block, bidhb_residual = 0, 0
if bidhb < params.num_hb_quotient:
block, bidhb_residual = divmod(l2_mod, params.l2_minor_divmod)
else:
block, bidhb_residual = divmod(l2_mod, params.l2_minor_residual_divmod)
bidhb_actual = bidhb * params.l2_minor + bidhb_residual
batch_idx, head_idx = divmod(bidhb_actual, params.num_head_divmod)
if cutlass.const_expr(params.spt):
block = params.num_block - 1 - block
if cutlass.const_expr(params.cluster_shape_mn[0] > 1):
bidx_in_cluster = cute.arch.block_in_cluster_idx()
block = block * params.cluster_shape_mn[0] + bidx_in_cluster[0]
is_valid = self._tile_idx < params.total_blocks
return WorkTileInfo((Int32(block), Int32(head_idx), Int32(batch_idx), Int32(0)), is_valid)
def initial_work_tile_info(self, *, loc=None, ip=None):
return self.get_current_work(loc=loc, ip=ip)
def prefetch_next_work(self, *, loc=None, ip=None):
pass
def advance_to_next_work(self, *, loc=None, ip=None):
# Single tile scheduler - set to invalid tile_idx to indicate no more work
self._tile_idx = self.params.total_blocks
def __extract_mlir_values__(self):
values, self._values_pos = [], []
for obj in [self.params, self._tile_idx]:
obj_values = cutlass.extract_mlir_values(obj)
values += obj_values
self._values_pos.append(len(obj_values))
return values
def __new_from_mlir_values__(self, values):
obj_list = []
for obj, n_items in zip([self.params, self._tile_idx], self._values_pos):
obj_list.append(cutlass.new_from_mlir_values(obj, values[:n_items]))
values = values[n_items:]
return self.__class__(*(tuple(obj_list)), loc=self._loc)
class SingleTileVarlenScheduler:
@dataclass
class Params(ParamsBase):
num_head: Int32
num_batch: Int32
total_q: Int32
num_splits: Int32
max_kvblock_in_l2: Int32
tile_shape_mn: cutlass.Constexpr[Tuple[int, int]]
mCuSeqlensQ: Optional[cute.Tensor] = None
mSeqUsedQ: Optional[cute.Tensor] = None
qhead_per_kvhead_packgqa: cutlass.Constexpr[int] = 1
lpt: cutlass.Constexpr[bool] = False
is_split_kv: cutlass.Constexpr[bool] = False
head_swizzle: cutlass.Constexpr[bool] = False
cluster_shape_m: cutlass.Constexpr[int] = 1
@staticmethod
@cute.jit
def create(
args: TileSchedulerArguments, *, loc=None, ip=None
) -> "SingleTileVarlenScheduler.Params":
size_l2 = 50 * 1024 * 1024 # 50 MB for K & V
max_kvblock_in_l2 = size_l2 // (
(args.headdim + args.headdim_v) * args.element_size * args.tile_shape_mn[1]
)
assert args.mCuSeqlensQ is not None or args.mSeqUsedQ is not None, (
"At least one of mCuSeqlensQ or mSeqUsedQ must be provided"
)
assert args.cluster_shape_mn[1] == 1, "Only cluster_shape_mn[1] == 1 is supported"
return SingleTileVarlenScheduler.Params(
num_head=args.num_head,
num_batch=args.num_batch,
total_q=args.total_q,
num_splits=args.num_splits,
max_kvblock_in_l2=max_kvblock_in_l2,
tile_shape_mn=args.tile_shape_mn,
mCuSeqlensQ=args.mCuSeqlensQ,
mSeqUsedQ=args.mSeqUsedQ,
qhead_per_kvhead_packgqa=args.qhead_per_kvhead_packgqa,
lpt=args.lpt,
is_split_kv=args.is_split_kv,
head_swizzle=args.head_swizzle,
cluster_shape_m=args.cluster_shape_mn[0],
)
def __init__(self, params: Params, tile_idx: Int32, split_idx: Int32, *, loc=None, ip=None):
self.params = params
self._tile_idx = tile_idx
self._split_idx = split_idx
self._is_first_block = True
self._loc = loc
self._ip = ip
@staticmethod
def to_underlying_arguments(args: TileSchedulerArguments, *, loc=None, ip=None) -> Params:
return SingleTileVarlenScheduler.Params.create(args, loc=loc, ip=ip)
@staticmethod
def create(params: Params, *, loc=None, ip=None) -> "SingleTileVarlenScheduler":
tile_idx, split_idx, _ = cute.arch.block_idx()
return SingleTileVarlenScheduler(params, tile_idx, split_idx, loc=loc, ip=ip)
# called by host
@staticmethod
def get_grid_shape(
params: Params,
*,
loc=None,
ip=None,
) -> Tuple[Int32, Int32, Int32]:
total_blocks_max = (
params.total_q
+ params.num_batch * (params.cluster_shape_m * params.tile_shape_mn[0] - 1)
) // params.tile_shape_mn[0]
# round down to nearest multiple of cluster since odd excess is always padding
total_blocks_max = total_blocks_max // params.cluster_shape_m * params.cluster_shape_m
return (total_blocks_max * params.num_head, params.num_splits, Int32(1))
@cute.jit
def _get_num_m_blocks(self, lane: Int32, bidb_start: Int32) -> Int32:
params = self.params
batch_idx = lane + bidb_start
if cutlass.const_expr(params.mSeqUsedQ is not None):
seqlen = Int32(0)
if batch_idx < params.num_batch:
seqlen = params.mSeqUsedQ[batch_idx]
else:
assert params.mCuSeqlensQ is not None
cur_cu_seqlen = Int32(0)
if batch_idx <= params.num_batch:
cur_cu_seqlen = params.mCuSeqlensQ[batch_idx]
next_cu_seqlen = cute.arch.shuffle_sync_down(cur_cu_seqlen, offset=1)
seqlen = next_cu_seqlen - cur_cu_seqlen
if cutlass.const_expr(params.qhead_per_kvhead_packgqa > 1):
seqlen *= params.qhead_per_kvhead_packgqa
return (
cute.ceil_div(cute.ceil_div(seqlen, params.tile_shape_mn[0]), params.cluster_shape_m)
if batch_idx < params.num_batch and lane < cute.arch.WARP_SIZE - 1
else Int32(0)
)
@cute.jit
def get_current_work(self, *, loc=None, ip=None) -> WorkTileInfo:
params = self.params
lane_idx = cute.arch.lane_idx()
num_m_blocks = self._get_num_m_blocks(lane_idx, bidb_start=0)
num_m_blocks_cumulative = utils.warp_prefix_sum(num_m_blocks, lane_idx)
# Total number of blocks for the next 31 batches
m_blocks_in_group = cute.arch.shuffle_sync(num_m_blocks_cumulative, cute.arch.WARP_SIZE - 1)
# Same for all lanes
group_end_tile = m_blocks_in_group * params.num_head
# if cute.arch.thread_idx()[0] == 128 + 31: cute.printf("SingleTileVarlenScheduler: tile_idx=%d, group_end_tile = %d, num_m_blocks=%d, num_m_blocks_cumulative = %d, m_blocks_in_group = %d", self._tile_idx, group_end_tile, num_m_blocks, num_m_blocks_cumulative, m_blocks_in_group)
block, head_idx, batch_idx = Int32(0), Int32(0), Int32(0)
next_tile_idx = self._tile_idx // params.cluster_shape_m
while group_end_tile <= next_tile_idx:
batch_idx += cute.arch.WARP_SIZE - 1
if batch_idx >= params.num_batch:
batch_idx = Int32(params.num_batch)
group_end_tile = next_tile_idx + 1
else:
num_m_blocks = self._get_num_m_blocks(lane_idx, bidb_start=batch_idx)
num_m_blocks_cumulative = utils.warp_prefix_sum(num_m_blocks, lane_idx)
m_blocks_in_group = cute.arch.shuffle_sync(
num_m_blocks_cumulative, cute.arch.WARP_SIZE - 1
)
group_end_tile += m_blocks_in_group * params.num_head
is_valid = False
if batch_idx >= params.num_batch:
block, head_idx, batch_idx = Int32(0), Int32(0), Int32(params.num_batch)
else:
group_start_tile = group_end_tile - m_blocks_in_group * params.num_head
# if cute.arch.thread_idx()[0] == 128 + 31: cute.printf("SingleTileVarlenScheduler: tile_idx=%d, group_end_tile = %d, num_m_blocks=%d, batch_idx = %d", self._tile_idx, group_end_tile, num_m_blocks, batch_idx)
# The next problem to process is the first one that does not have ending tile position
# that is greater than or equal to tile index.
batch_idx_in_group = cute.arch.popc(
cute.arch.vote_ballot_sync(
group_start_tile + num_m_blocks_cumulative * params.num_head <= next_tile_idx
)
)
batch_idx += batch_idx_in_group
num_m_blocks_prev_lane = (
0
if batch_idx_in_group == 0
else cute.arch.shuffle_sync(num_m_blocks_cumulative, batch_idx_in_group - 1)
)
num_m_blocks = cute.arch.shuffle_sync(num_m_blocks, batch_idx_in_group)
mh_block = next_tile_idx - group_start_tile - num_m_blocks_prev_lane * params.num_head
if cutlass.const_expr(params.lpt or params.head_swizzle):
# This is a version of the SingleTileLPTScheduler, complicated by the fact that
# the seqlen can vary per batch.
# TODO: is there any case where num_m_blocks is 0?
# TODO: by right we should read the seqlen_kv but we're assuming seqlen_q == seqlen_k here
num_n_blocks = (
num_m_blocks
* params.tile_shape_mn[0]
// params.qhead_per_kvhead_packgqa
// params.tile_shape_mn[1]
)
# nheads_in_l2 = min(max(self.max_kvblock_in_l2 // num_n_blocks, 1), self.num_head)
# Seems faster to have this be a power of 2
nheads_in_l2 = (
16
if num_n_blocks * 16 <= params.max_kvblock_in_l2
else (
8
if num_n_blocks * 8 <= params.max_kvblock_in_l2
else (
4
if num_n_blocks * 4 <= params.max_kvblock_in_l2
else (2 if num_n_blocks * 2 <= params.max_kvblock_in_l2 else 1)
)
)
)
nheads_in_l2 = min(nheads_in_l2, params.num_head)
mh_in_l2 = nheads_in_l2 * num_m_blocks
section_idx = mh_block // mh_in_l2
l2_mod = mh_block - section_idx * mh_in_l2
# Deal with tail section
nheads_in_this_section = (
nheads_in_l2
if nheads_in_l2 * (section_idx + 1) <= params.num_head
else params.num_head - section_idx * nheads_in_l2
)
block = l2_mod // nheads_in_this_section
head_idx_residual = l2_mod - block * nheads_in_this_section
head_idx = section_idx * nheads_in_l2 + head_idx_residual
if cutlass.const_expr(params.lpt):
block = num_m_blocks - 1 - block
else:
head_idx = mh_block // num_m_blocks
block = mh_block - head_idx * num_m_blocks
is_valid = self._is_first_block and batch_idx < params.num_batch
if cutlass.const_expr(params.cluster_shape_m > 1):
bidx_in_cluster = cute.arch.block_in_cluster_idx()
block = block * params.cluster_shape_m + bidx_in_cluster[0]
# if cute.arch.thread_idx()[0] == 128: cute.printf("SingleTileVarlenScheduler: tile_idx=%d, batch_idx=%d, head_idx=%d, block=%d, is_valid = %d", self._tile_idx, batch_idx, head_idx, block, is_valid)
split_idx = self._split_idx if const_expr(params.is_split_kv) else Int32(0)
return WorkTileInfo((Int32(block), Int32(head_idx), Int32(batch_idx), split_idx), is_valid)
def initial_work_tile_info(self, *, loc=None, ip=None):
return self.get_current_work(loc=loc, ip=ip)
def prefetch_next_work(self, *, loc=None, ip=None):
pass
def advance_to_next_work(self, *, loc=None, ip=None):
# Single tile scheduler - set to invalid tile_idx to indicate no more work
self._is_first_block = False
def __extract_mlir_values__(self):
values, self._values_pos = [], []
for obj in [self.params, self._tile_idx, self._split_idx]:
obj_values = cutlass.extract_mlir_values(obj)
values += obj_values
self._values_pos.append(len(obj_values))
return values
def __new_from_mlir_values__(self, values):
obj_list = []
for obj, n_items in zip(
[self.params, self._tile_idx, self._split_idx],
self._values_pos,
):
obj_list.append(cutlass.new_from_mlir_values(obj, values[:n_items]))
values = values[n_items:]
return SingleTileVarlenScheduler(*(tuple(obj_list)), loc=self._loc)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/tile_scheduler.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 651,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/blackwell_helpers.py | # Copyright (c) 2025, Tri Dao.
from typing import Optional, Tuple
import cutlass
import cutlass.cute as cute
from cutlass import Int32, Boolean, const_expr
from cutlass.cute.nvgpu import tcgen05
from cutlass._mlir.dialects import llvm
import flash_attn.cute.mma_sm100_desc as sm100_desc
@cute.jit
def gemm_w_idx(
tiled_mma: cute.TiledMma,
acc: cute.Tensor,
tCrA: cute.Tensor,
tCrB: cute.Tensor,
A_idx: Optional[Int32] = None,
B_idx: Optional[Int32] = None,
zero_init: bool | Boolean = False,
swap_AB: bool = False,
num_unroll_groups: int = 1,
) -> None:
if const_expr(swap_AB):
return gemm_w_idx(
tiled_mma, acc, tCrB, tCrA, B_idx, A_idx, zero_init=zero_init, swap_AB=False
)
else:
rA = tCrA if const_expr(A_idx is None) else tCrA[None, None, None, A_idx]
rB = tCrB if const_expr(B_idx is None) else tCrB[None, None, None, B_idx]
mma_atom = cute.make_mma_atom(tiled_mma.op)
for k in cutlass.range(
cute.size(tCrA.shape[2]), unroll=cute.size(tCrA.shape[2]) // num_unroll_groups
):
mma_atom.set(tcgen05.Field.ACCUMULATE, not zero_init or k != 0)
cute.gemm(mma_atom, acc, rA[None, None, k], rB[None, None, k], acc)
@cute.jit
def gemm_ptx_w_idx(
tiled_mma: cute.TiledMma,
acc: cute.Tensor,
tCrA: cute.Tensor,
tCrB: cute.Tensor,
sA: Optional[cute.Tensor],
sB: cute.Tensor,
A_idx: Optional[Int32] = None,
B_idx: Optional[Int32] = None,
zero_init: bool | Boolean = False,
cta_group: int = 1,
**kwargs,
) -> None:
rA = tCrA if const_expr(A_idx is None) else tCrA[None, None, None, A_idx]
rB = tCrB if const_expr(B_idx is None) else tCrB[None, None, None, B_idx]
sA_cur = None
if const_expr(sA is not None):
sA_cur = sA if const_expr(A_idx is None) else sA[None, None, None, A_idx]
sB_cur = sB if const_expr(B_idx is None) else sB[None, None, None, B_idx]
mma_atom = cute.make_mma_atom(tiled_mma.op)
acc_tmem_addr = acc.iterator.toint()
gemm_ptx_partial(
mma_atom.op,
acc_tmem_addr,
rA,
rB,
sA_cur,
sB_cur,
zero_init=zero_init,
cta_group=cta_group,
**kwargs,
)
@cute.jit
def gemm(
tiled_mma: cute.TiledMma,
acc: cute.Tensor,
tCrA: cute.Tensor,
tCrB: cute.Tensor,
zero_init: bool | Boolean = False,
) -> None:
mma_atom = cute.make_mma_atom(tiled_mma.op)
for k in cutlass.range_constexpr(cute.size(tCrA.shape[2])):
mma_atom.set(tcgen05.Field.ACCUMULATE, not zero_init or k != 0)
cute.gemm(mma_atom, acc, tCrA[None, None, k], tCrB[None, None, k], acc)
def i64_to_i32x2(i: int) -> Tuple[int, int]:
"""Convert a 64-bit integer to a tuple of two 32-bit integers."""
return i & 0xFFFF_FFFF, (i >> 32) & 0xFFFF_FFFF
@cute.jit
def gemm_ptx(
op: cute.nvgpu.tcgen05.mma.MmaOp,
acc: cute.Tensor,
tCrA: cute.Tensor,
tCrB: cute.Tensor,
sA: Optional[cute.Tensor],
sB: cute.Tensor,
zero_init: bool | Boolean = False,
) -> None:
is_ts = op.a_src == cute.nvgpu.tcgen05.OperandSource.TMEM
if const_expr(not is_ts):
assert sA is not None, "sA must be provided when a_src is not TMEM"
sA_layout = sA.layout if sA is not None else None
sB_layout = sB.layout
idesc: int = const_expr(sm100_desc.mma_op_to_idesc(op))
if const_expr(not is_ts):
sA_swizzle = sA.iterator.type.swizzle_type
smem_desc_base_a: int = const_expr(
sm100_desc.make_smem_desc_base(
cute.recast_layout(128, op.a_dtype.width, sA_layout[0]),
sA_swizzle,
sm100_desc.Major.K
if const_expr(op.a_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K)
else sm100_desc.Major.MN,
)
)
smem_desc_base_a_lo, smem_desc_a_hi = i64_to_i32x2(smem_desc_base_a)
smem_desc_base_a_lo = const_expr(smem_desc_base_a_lo)
smem_desc_a_hi = const_expr(smem_desc_a_hi)
else:
smem_desc_base_a = None
smem_desc_base_a_lo, smem_desc_a_hi = None, None
sB_swizzle = sB.iterator.type.swizzle_type
smem_desc_base_b: int = const_expr(
sm100_desc.make_smem_desc_base(
cute.recast_layout(128, op.b_dtype.width, sB_layout[0]),
sB_swizzle,
sm100_desc.Major.K
if const_expr(op.b_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K)
else sm100_desc.Major.MN,
)
)
smem_desc_base_b_lo, smem_desc_b_hi = i64_to_i32x2(smem_desc_base_b)
smem_desc_base_b_lo = const_expr(smem_desc_base_b_lo)
smem_desc_b_hi = const_expr(smem_desc_b_hi)
if const_expr(not is_ts):
smem_desc_start_a_lo = Int32(smem_desc_base_a_lo) | sm100_desc.make_smem_desc_start_addr(
sA[None, None, 0].iterator
)
else:
smem_desc_start_a_lo = None
smem_desc_start_b_lo = Int32(smem_desc_base_b_lo) | sm100_desc.make_smem_desc_start_addr(
sB[None, None, 0].iterator
)
for k in cutlass.range_constexpr(cute.size(tCrA.shape[2])):
if const_expr(not is_ts):
smem_desc_a_lo = smem_desc_start_a_lo + (
(cute.crd2idx((0, 0, k), sA_layout) * sA.element_type.width // 8) >> 4
)
smem_desc_b_lo = smem_desc_start_b_lo + (
(cute.crd2idx((0, 0, k), sB_layout) * sB.element_type.width // 8) >> 4
)
# with cute.arch.elect_one():
# cute.printf("smem_desc_a_lo = {}, smem_desc_b_lo = {}", smem_desc_a_lo, smem_desc_b_lo)
# cute.printf("smem_desc_a_lo_correct = {}, smem_desc_b_lo_correct = {}", smem_desc_a_lo_correct, smem_desc_b_lo_correct)
with cute.arch.elect_one():
if const_expr(not is_ts):
llvm.inline_asm(
None,
[
acc.iterator.toint().ir_value(),
smem_desc_a_lo.ir_value(),
smem_desc_b_lo.ir_value(),
Int32(not zero_init or k != 0).ir_value(),
],
"{\n\t"
".reg .pred p;\n\t"
".reg .b64 smem_desc_a, smem_desc_b;\n\t"
".reg .b32 idesc;\n\t"
f"mov.b32 idesc, {hex(idesc)};\n\t"
f"mov.b64 smem_desc_a, {{$1, {hex(smem_desc_a_hi)}}};\n\t"
f"mov.b64 smem_desc_b, {{$2, {hex(smem_desc_b_hi)}}};\n\t"
"setp.ne.b32 p, $3, 0;\n\t"
f"tcgen05.mma.cta_group::1.kind::f16 [$0], smem_desc_a, smem_desc_b, idesc, p;\n\t"
"}\n",
"r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
else:
llvm.inline_asm(
None,
[
acc.iterator.toint().ir_value(),
tCrA[None, None, k].iterator.toint().ir_value(),
smem_desc_b_lo.ir_value(),
Int32(not zero_init or k != 0).ir_value(),
],
"{\n\t"
".reg .pred p;\n\t"
".reg .b64 smem_desc_b;\n\t"
f"mov.b64 smem_desc_b, {{$2, {hex(smem_desc_b_hi)}}};\n\t"
"setp.ne.b32 p, $3, 0;\n\t"
f"tcgen05.mma.cta_group::1.kind::f16 [$0], [$1], smem_desc_b, {hex(idesc)}, p;\n\t"
"}\n",
"r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@cute.jit
def gemm_ptx_loop(
op: cute.nvgpu.tcgen05.mma.MmaOp,
acc: cute.Tensor,
tCrA: cute.Tensor,
tCrB: cute.Tensor,
sA: Optional[cute.Tensor],
sB: cute.Tensor,
zero_init: bool | Boolean = False,
) -> None:
is_ts = op.a_src == cute.nvgpu.tcgen05.OperandSource.TMEM
if const_expr(not is_ts):
assert sA is not None, "sA must be provided when a_src is not TMEM"
sA_layout = sA.layout if sA is not None else tCrA.layout
sB_layout = sB.layout
idesc: int = const_expr(sm100_desc.mma_op_to_idesc(op))
if const_expr(not is_ts):
sA_swizzle = sA.iterator.type.swizzle_type
smem_desc_base_a: int = const_expr(
sm100_desc.make_smem_desc_base(
cute.recast_layout(128, op.a_dtype.width, sA_layout[0]),
sA_swizzle,
sm100_desc.Major.K
if const_expr(op.a_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K)
else sm100_desc.Major.MN,
)
)
smem_desc_base_a_lo, smem_desc_a_hi = i64_to_i32x2(smem_desc_base_a)
smem_desc_base_a_lo = const_expr(smem_desc_base_a_lo)
smem_desc_a_hi = const_expr(smem_desc_a_hi)
else:
smem_desc_base_a = None
smem_desc_base_a_lo, smem_desc_a_hi = None, None
sB_swizzle = sB.iterator.type.swizzle_type
smem_desc_base_b: int = const_expr(
sm100_desc.make_smem_desc_base(
cute.recast_layout(128, op.b_dtype.width, sB_layout[0]),
sB_swizzle,
sm100_desc.Major.K
if const_expr(op.b_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K)
else sm100_desc.Major.MN,
)
)
smem_desc_base_b_lo, smem_desc_b_hi = i64_to_i32x2(smem_desc_base_b)
smem_desc_base_b_lo = const_expr(smem_desc_base_b_lo)
smem_desc_b_hi = const_expr(smem_desc_b_hi)
if const_expr(not is_ts):
offset_a = [
(cute.crd2idx((0, 0, k), sA_layout) * sA.element_type.width // 8) >> 4
for k in cutlass.range_constexpr(cute.size(tCrA.shape[2]))
]
else:
offset_a = [
cute.crd2idx((0, 0, k), sA_layout) * op.a_dtype.width // 32
for k in cutlass.range_constexpr(cute.size(tCrA.shape[2]))
]
offset_a_diff = [
offset_a[k] - offset_a[k - 1] for k in cutlass.range_constexpr(1, cute.size(tCrA.shape[2]))
]
offset_b = [
(cute.crd2idx((0, 0, k), sB_layout) * sB.element_type.width // 8) >> 4
for k in cutlass.range_constexpr(cute.size(tCrB.shape[2]))
]
offset_b_diff = [
offset_b[k] - offset_b[k - 1] for k in cutlass.range_constexpr(1, cute.size(tCrB.shape[2]))
]
if const_expr(not is_ts):
smem_desc_start_a_lo = Int32(
smem_desc_base_a_lo | sm100_desc.make_smem_desc_start_addr(sA[None, None, 0].iterator)
)
else:
smem_desc_start_a_lo = None
smem_desc_start_b_lo = Int32(
smem_desc_base_b_lo | sm100_desc.make_smem_desc_start_addr(sB[None, None, 0].iterator)
)
pred_str = "p" if isinstance(zero_init, Boolean) else "0" if zero_init else "1"
if const_expr(not is_ts):
llvm.inline_asm(
None,
[
acc.iterator.toint().ir_value(),
Int32(cute.arch.make_warp_uniform(smem_desc_start_a_lo)).ir_value(),
Int32(cute.arch.make_warp_uniform(smem_desc_start_b_lo)).ir_value(),
Int32(not zero_init).ir_value(),
],
"{\n\t"
".reg .pred leader_thread;\n\t"
".reg .pred p;\n\t"
".reg .b32 idesc;\n\t"
".reg .b32 smem_desc_a_lo, smem_desc_b_lo;\n\t"
".reg .b32 smem_desc_a_hi, smem_desc_b_hi;\n\t"
".reg .b64 smem_desc_a, smem_desc_b;\n\t"
"elect.sync _|leader_thread, -1;\n\t"
f"mov.b32 idesc, {hex(idesc)};\n\t"
"mov.b32 smem_desc_a_lo, $1;\n\t"
"mov.b32 smem_desc_b_lo, $2;\n\t"
f"mov.b32 smem_desc_a_hi, {hex(smem_desc_a_hi)};\n\t"
f"mov.b32 smem_desc_b_hi, {hex(smem_desc_b_hi)};\n\t"
f"mov.b64 smem_desc_a, {{smem_desc_a_lo, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
"setp.ne.b32 p, $3, 0;\n\t"
f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [$0], smem_desc_a, smem_desc_b, idesc, {pred_str};\n\t"
+ "".join(
(
f"add.u32 smem_desc_a_lo, smem_desc_a_lo, {hex(offset_a_diff[k - 1])};\n\t"
f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"mov.b64 smem_desc_a, {{smem_desc_a_lo, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [$0], smem_desc_a, smem_desc_b, idesc, 1;\n\t"
)
for k in cutlass.range_constexpr(1, cute.size(tCrA.shape[2]))
)
+ "}\n",
"r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
else:
llvm.inline_asm(
None,
[
acc.iterator.toint().ir_value(),
Int32(tCrA[None, None, 0].iterator.toint()).ir_value(),
Int32(smem_desc_start_b_lo).ir_value(),
Int32(not zero_init).ir_value(),
],
"{\n\t"
".reg .pred leader_thread;\n\t"
".reg .pred p;\n\t"
".reg .b32 idesc;\n\t"
".reg .b32 tmem_a;\n\t"
".reg .b32 smem_desc_b_lo;\n\t"
".reg .b32 smem_desc_b_hi;\n\t"
".reg .b64 smem_desc_b;\n\t"
"elect.sync _|leader_thread, -1;\n\t"
f"mov.b32 idesc, {hex(idesc)};\n\t"
"mov.b32 tmem_a, $1;\n\t"
"mov.b32 smem_desc_b_lo, $2;\n\t"
f"mov.b32 smem_desc_b_hi, {hex(smem_desc_b_hi)};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
"setp.ne.b32 p, $3, 0;\n\t"
f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [$0], [tmem_a], smem_desc_b, idesc, {pred_str};\n\t"
+ "".join(
(
# f"add.u32 tmem_a, tmem_a, {hex(offset_a_diff[k - 1])};\n\t"
f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
# f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [$0], [tmem_a], smem_desc_b, idesc, 1;\n\t"
f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [$0], [tmem_a + {hex(offset_a[k])}], smem_desc_b, idesc, 1;\n\t"
)
for k in cutlass.range_constexpr(1, cute.size(tCrA.shape[2]))
)
+ "}\n",
"r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@cute.jit
def gemm_ptx_partial(
op: cute.nvgpu.tcgen05.mma.MmaOp,
acc_tmem_addr: Int32,
tCrA: cute.Tensor,
tCrB: cute.Tensor,
sA: Optional[cute.Tensor],
sB: cute.Tensor,
mbar_ptr: Optional[cutlass.Pointer] = None,
mbar_phase: Optional[Int32] = None,
split_arrive: Optional[int] = None,
zero_init: bool | Boolean = False,
# sA_offset: Int32 = 0,
# acc_offset: Int32 = 0,
tA_addr: Optional[Int32] = None,
cta_group: int = 1,
) -> None:
# acc_tmem_addr += acc_offset
is_ts = op.a_src == cute.nvgpu.tcgen05.OperandSource.TMEM
if const_expr(not is_ts):
assert sA is not None, "sA must be provided when a_src is not TMEM"
sA_layout = sA.layout if sA is not None else tCrA.layout
sB_layout = sB.layout
idesc: int = const_expr(sm100_desc.mma_op_to_idesc(op))
if const_expr(not is_ts):
sA_swizzle = sA.iterator.type.swizzle_type
smem_desc_base_a: int = const_expr(
sm100_desc.make_smem_desc_base(
cute.recast_layout(128, op.a_dtype.width, sA_layout[0]),
sA_swizzle,
sm100_desc.Major.K
if const_expr(op.a_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K)
else sm100_desc.Major.MN,
)
)
smem_desc_base_a_lo, smem_desc_a_hi = i64_to_i32x2(smem_desc_base_a)
smem_desc_base_a_lo = const_expr(smem_desc_base_a_lo)
smem_desc_a_hi = const_expr(smem_desc_a_hi)
else:
smem_desc_base_a = None
smem_desc_base_a_lo, smem_desc_a_hi = None, None
sB_swizzle = sB.iterator.type.swizzle_type
smem_desc_base_b: int = const_expr(
sm100_desc.make_smem_desc_base(
cute.recast_layout(128, op.b_dtype.width, sB_layout[0]),
sB_swizzle,
sm100_desc.Major.K
if const_expr(op.b_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K)
else sm100_desc.Major.MN,
)
)
smem_desc_base_b_lo, smem_desc_b_hi = i64_to_i32x2(smem_desc_base_b)
smem_desc_base_b_lo = const_expr(smem_desc_base_b_lo)
smem_desc_b_hi = const_expr(smem_desc_b_hi)
tCrA_layout = (
tCrA.layout
if const_expr(not is_ts)
else cute.recast_layout(32, tCrA.element_type.width, tCrA.layout)
)
offset_a = [cute.crd2idx((0, 0, k), tCrA_layout) for k in range(cute.size(tCrA.shape[2]))]
offset_a_diff = [offset_a[k] - offset_a[k - 1] for k in range(1, cute.size(tCrA.shape[2]))]
offset_b = [cute.crd2idx((0, 0, k), tCrB.layout) for k in range(cute.size(tCrB.shape[2]))]
offset_b_diff = [offset_b[k] - offset_b[k - 1] for k in range(1, cute.size(tCrB.shape[2]))]
if const_expr(not is_ts):
smem_desc_start_a_lo = Int32(
smem_desc_base_a_lo | sm100_desc.make_smem_desc_start_addr(sA[None, None, 0].iterator)
)
# ) + sA_offset
else:
smem_desc_start_a_lo = None
smem_desc_start_b_lo = Int32(
smem_desc_base_b_lo | sm100_desc.make_smem_desc_start_addr(sB[None, None, 0].iterator)
)
pred_str = "p" if isinstance(zero_init, Boolean) else "0" if zero_init else "1"
if const_expr(not is_ts):
assert mbar_ptr is None, "mbar_ptr must be None when a_src is not TMEM"
llvm.inline_asm(
None,
[
# acc.iterator.toint().ir_value(),
Int32(cute.arch.make_warp_uniform(smem_desc_start_a_lo)).ir_value(),
Int32(cute.arch.make_warp_uniform(smem_desc_start_b_lo)).ir_value(),
Int32(not zero_init).ir_value(),
Int32(cute.arch.make_warp_uniform(acc_tmem_addr)).ir_value(),
],
"{\n\t"
".reg .pred leader_thread;\n\t"
".reg .pred p;\n\t"
".reg .b32 idesc;\n\t"
".reg .b32 tmem_acc;\n\t"
".reg .b32 smem_desc_a_lo_start, smem_desc_b_lo_start;\n\t"
".reg .b32 smem_desc_a_lo, smem_desc_b_lo;\n\t"
".reg .b32 smem_desc_a_hi, smem_desc_b_hi;\n\t"
".reg .b64 smem_desc_a, smem_desc_b;\n\t"
"elect.sync _|leader_thread, -1;\n\t"
f"mov.b32 idesc, {hex(idesc)};\n\t"
# f"mov.b32 tmem_acc, {hex(acc_tmem_addr)};\n\t"
f"mov.b32 tmem_acc, $3;\n\t"
"mov.b32 smem_desc_a_lo_start, $0;\n\t"
"mov.b32 smem_desc_b_lo_start, $1;\n\t"
f"mov.b32 smem_desc_a_hi, {hex(smem_desc_a_hi)};\n\t"
f"mov.b32 smem_desc_b_hi, {hex(smem_desc_b_hi)};\n\t"
f"mov.b64 smem_desc_a, {{smem_desc_a_lo_start, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo_start, smem_desc_b_hi}};\n\t"
"setp.ne.b32 p, $2, 0;\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], smem_desc_a, smem_desc_b, idesc, {pred_str};\n\t"
+ "".join(
(
# f"add.u32 smem_desc_a_lo, smem_desc_a_lo, {hex(offset_a_diff[k - 1])};\n\t"
# f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"add.u32 smem_desc_a_lo, smem_desc_a_lo_start, {hex(offset_a[k])};\n\t"
f"add.u32 smem_desc_b_lo, smem_desc_b_lo_start, {hex(offset_b[k])};\n\t"
f"mov.b64 smem_desc_a, {{smem_desc_a_lo, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], smem_desc_a, smem_desc_b, idesc, 1;\n\t"
)
for k in range(1, cute.size(tCrA.shape[2]))
)
+ "}\n",
# "r,r,r",
"r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
else:
# For TS gemm, somehow tCrA.iterator.toint() returns 0 no matter what, so we need to
# explicitly pass in the tA_addr for correctness.
tA_addr = tCrA[None, None, 0].iterator.toint() if tA_addr is None else tA_addr
input_args = [
# Int32(cute.arch.make_warp_uniform(tCrA[None, None, 0].iterator.toint())).ir_value(),
Int32(cute.arch.make_warp_uniform(tA_addr)).ir_value(),
Int32(cute.arch.make_warp_uniform(smem_desc_start_b_lo)).ir_value(),
Int32(not zero_init).ir_value(),
Int32(cute.arch.make_warp_uniform(acc_tmem_addr)).ir_value(),
]
if const_expr(mbar_ptr is not None):
assert mbar_phase is not None, "mbar_phase must be provided when mbar_ptr is not None"
assert split_arrive is not None, (
"split_arrive must be provided when mbar_ptr is not None"
)
split_arrive_idx = split_arrive // op.shape_mnk[2]
input_args.append(mbar_ptr.toint().ir_value())
input_args.append(Int32(mbar_phase).ir_value())
mbar_wait_str = (
".reg .pred P1; \n\t"
"LAB_WAIT: \n\t"
"mbarrier.try_wait.parity.shared::cta.b64 P1, [$4], $5, 10000000; \n\t"
"@P1 bra DONE; \n\t"
"bra LAB_WAIT; \n\t"
"DONE: \n\t"
)
else:
mbar_wait_str = ""
llvm.inline_asm(
None,
# [
# # acc.iterator.toint().ir_value(),
# Int32(tCrA[None, None, 0].iterator.toint()).ir_value(),
# Int32(smem_desc_start_b_lo).ir_value(),
# Int32(not zero_init).ir_value(),
# ],
input_args,
"{\n\t"
".reg .pred leader_thread;\n\t"
".reg .pred p;\n\t"
".reg .b32 idesc;\n\t"
".reg .b32 tmem_acc;\n\t"
".reg .b32 tmem_a;\n\t"
".reg .b32 smem_desc_b_lo_start;\n\t"
".reg .b32 smem_desc_b_lo;\n\t"
".reg .b32 smem_desc_b_hi;\n\t"
".reg .b64 smem_desc_b;\n\t"
"elect.sync _|leader_thread, -1;\n\t"
f"mov.b32 idesc, {hex(idesc)};\n\t"
# f"mov.b32 tmem_acc, {hex(acc_tmem_addr)};\n\t"
f"mov.b32 tmem_acc, $3;\n\t"
f"mov.b32 tmem_a, $0;\n\t"
f"mov.b32 smem_desc_b_lo_start, $1;\n\t"
f"mov.b32 smem_desc_b_hi, {hex(smem_desc_b_hi)};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo_start, smem_desc_b_hi}};\n\t"
"setp.ne.b32 p, $2, 0;\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], [tmem_a], smem_desc_b, idesc, {pred_str};\n\t"
+ "".join(
(
# f"add.u32 tmem_a, tmem_a, {hex(offset_a_diff[k - 1])};\n\t"
# f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"add.u32 smem_desc_b_lo, smem_desc_b_lo_start, {hex(offset_b[k])};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
# f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [tmem_acc], [tmem_a], smem_desc_b, idesc, 1;\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], [tmem_a + {hex(offset_a[k])}], smem_desc_b, idesc, 1;\n\t"
)
for k in range(
1,
cute.size(tCrA.shape[2]) if const_expr(mbar_ptr is None) else split_arrive_idx,
)
)
+ mbar_wait_str
+ (
"".join(
(
f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], [tmem_a + {hex(offset_a[k])}], smem_desc_b, idesc, 1;\n\t"
)
for k in range(split_arrive_idx, cute.size(tCrA.shape[2]))
)
if const_expr(mbar_ptr is not None)
else ""
)
+ "}\n",
"r,r,r,r" if const_expr(mbar_ptr is None) else "r,r,r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@cute.jit
def gemm_ptx_partial1(
op: cute.nvgpu.tcgen05.mma.MmaOp,
acc_tmem_addr: cutlass.Constexpr[int],
tCrA: cute.Tensor,
tCrB: cute.Tensor,
sA_base_addr_for_desc: Int32,
sA_addr_offset_for_desc: cutlass.Constexpr[int],
sA_stage: Int32,
sB_base_addr_for_desc: Int32,
sB_addr_offset_for_desc: cutlass.Constexpr[int],
sB_stage: Int32,
sA_layout: Optional[cute.Layout],
sB_layout: Optional[cute.Layout],
sA_swizzle: Optional[cute.Swizzle],
sB_swizzle: cute.Swizzle,
zero_init: bool | Boolean = False,
) -> None:
is_ts = op.a_src == cute.nvgpu.tcgen05.OperandSource.TMEM
if const_expr(not is_ts):
assert sA_layout is not None, "sA_layout must be provided when a_src is not TMEM"
assert sA_swizzle is not None, "sA_swizzle must be provided when a_src is not TMEM"
idesc: int = const_expr(sm100_desc.mma_op_to_idesc(op))
if const_expr(not is_ts):
smem_desc_base_a: int = const_expr(
sm100_desc.make_smem_desc_base(
cute.recast_layout(128, op.a_dtype.width, sA_layout[0]),
sA_swizzle,
sm100_desc.Major.K
if const_expr(op.a_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K)
else sm100_desc.Major.MN,
)
)
smem_desc_base_a_lo, smem_desc_a_hi = i64_to_i32x2(smem_desc_base_a)
smem_desc_base_a_lo = const_expr(smem_desc_base_a_lo)
smem_desc_a_hi = const_expr(smem_desc_a_hi)
else:
smem_desc_base_a = None
smem_desc_base_a_lo, smem_desc_a_hi = None, None
smem_desc_base_b: int = const_expr(
sm100_desc.make_smem_desc_base(
cute.recast_layout(128, op.b_dtype.width, sB_layout[0]),
sB_swizzle,
sm100_desc.Major.K
if const_expr(op.b_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K)
else sm100_desc.Major.MN,
)
)
smem_desc_base_b_lo, smem_desc_b_hi = i64_to_i32x2(smem_desc_base_b)
smem_desc_base_b_lo = const_expr(smem_desc_base_b_lo)
smem_desc_b_hi = const_expr(smem_desc_b_hi)
mask = [Int32(0)] * 4
if const_expr(not is_ts):
offset_a = [
(cute.crd2idx((0, 0, k), sA_layout) * op.a_dtype.width // 8) >> 4
for k in range(cute.size(tCrA.shape[2]))
]
else:
offset_a = [
cute.crd2idx((0, 0, k), sA_layout) * op.a_dtype.width // 32
for k in range(cute.size(tCrA.shape[2]))
]
offset_a_diff = [offset_a[k] - offset_a[k - 1] for k in range(1, cute.size(tCrA.shape[2]))]
offset_b = [
(cute.crd2idx((0, 0, k), sB_layout) * op.b_dtype.width // 8) >> 4
for k in range(cute.size(tCrB.shape[2]))
]
offset_b_diff = [offset_b[k] - offset_b[k - 1] for k in range(1, cute.size(tCrB.shape[2]))]
if const_expr(not is_ts):
# smem_desc_start_a_lo = Int32(smem_desc_base_a_lo | sm100_desc.make_smem_desc_start_addr(sA[None, None, 0].iterator))
smem_desc_start_a_lo = const_expr(smem_desc_base_a_lo)
else:
smem_desc_start_a_lo = None
# smem_desc_start_b_lo = Int32(smem_desc_base_b_lo | sm100_desc.make_smem_desc_start_addr(sB[None, None, 0].iterator))
smem_desc_start_b_lo = const_expr(smem_desc_base_b_lo)
pred_str = "p" if isinstance(zero_init, Boolean) else "0" if zero_init else "1"
if const_expr(not is_ts):
llvm.inline_asm(
None,
[
# acc.iterator.toint().ir_value(),
# Int32(cute.arch.make_warp_uniform(smem_desc_start_a_lo)).ir_value(),
Int32(sA_base_addr_for_desc).ir_value(),
Int32(sA_stage).ir_value(),
# Int32(cute.arch.make_warp_uniform(smem_desc_start_b_lo)).ir_value(),
Int32(sB_base_addr_for_desc).ir_value(),
Int32(sB_stage).ir_value(),
Int32(not zero_init).ir_value(),
mask[0].ir_value(),
mask[1].ir_value(),
mask[2].ir_value(),
mask[3].ir_value(),
],
"{\n\t"
".reg .pred leader_thread;\n\t"
".reg .pred p;\n\t"
".reg .b32 idesc;\n\t"
".reg .b32 tmem_acc;\n\t"
".reg .b32 smem_desc_a_lo, smem_desc_b_lo;\n\t"
".reg .b32 smem_desc_a_hi, smem_desc_b_hi;\n\t"
".reg .b64 smem_desc_a, smem_desc_b;\n\t"
"elect.sync _|leader_thread, -1;\n\t"
f"mov.b32 idesc, {hex(idesc)};\n\t"
f"mov.b32 tmem_acc, {hex(acc_tmem_addr)};\n\t"
# "mov.b32 smem_desc_a_lo, $0;\n\t"
# f"add.u32 smem_desc_a_lo, $0, {hex(smem_desc_start_a_lo)};\n\t"
f"mad.lo.u32 smem_desc_a_lo, $1, {hex(sA_addr_offset_for_desc)}, $0;\n\t"
# "mov.b32 smem_desc_b_lo, $2;\n\t"
f"mad.lo.u32 smem_desc_b_lo, $3, {hex(sB_addr_offset_for_desc)}, $2;\n\t"
f"mov.b32 smem_desc_a_hi, {hex(smem_desc_a_hi)};\n\t"
f"mov.b32 smem_desc_b_hi, {hex(smem_desc_b_hi)};\n\t"
f"mov.b64 smem_desc_a, {{smem_desc_a_lo, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
"setp.ne.b32 p, $4, 0;\n\t"
f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [tmem_acc], smem_desc_a, smem_desc_b, idesc, {{$5, $6, $7, $8}}, {pred_str};\n\t"
+ "".join(
(
f"add.u32 smem_desc_a_lo, smem_desc_a_lo, {hex(offset_a_diff[k - 1])};\n\t"
f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"mov.b64 smem_desc_a, {{smem_desc_a_lo, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [tmem_acc], smem_desc_a, smem_desc_b, idesc, {{$5, $6, $7, $8}}, 1;\n\t"
)
for k in range(1, cute.size(tCrA.shape[2]))
)
+ "}\n",
"r,r,r,r,r,r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
else:
llvm.inline_asm(
None,
[
# acc.iterator.toint().ir_value(),
Int32(tCrA[None, None, 0].iterator.toint()).ir_value(),
Int32(smem_desc_start_b_lo).ir_value(),
Int32(not zero_init).ir_value(),
mask[0].ir_value(),
mask[1].ir_value(),
mask[2].ir_value(),
mask[3].ir_value(),
],
"{\n\t"
".reg .pred leader_thread;\n\t"
".reg .pred p;\n\t"
".reg .b32 idesc;\n\t"
".reg .b32 tmem_a;\n\t"
".reg .b32 smem_desc_b_lo;\n\t"
".reg .b32 smem_desc_b_hi;\n\t"
".reg .b64 smem_desc_b;\n\t"
"elect.sync _|leader_thread, -1;\n\t"
f"mov.b32 idesc, {hex(idesc)};\n\t"
f"mov.b32 tmem_a, $1;\n\t"
f"mov.b32 smem_desc_b_lo, $2;\n\t"
f"mov.b32 smem_desc_b_hi, {hex(smem_desc_b_hi)};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
"setp.ne.b32 p, $3, 0;\n\t"
f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [$0], [tmem_a], smem_desc_b, idesc, {{$4, $5, $6, $7}}, {pred_str};\n\t"
+ "".join(
(
f"add.u32 tmem_a, tmem_a, {hex(offset_a_diff[k - 1])};\n\t"
f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [$0], [tmem_a], smem_desc_b, idesc, {{$4, $5, $6, $7}}, 1;\n\t"
)
for k in range(1, cute.size(tCrA.shape[2]))
)
+ "}\n",
"r,r,r,r,r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@cute.jit
def gemm_ptx_precomputed(
acc_tmem_addr: Int32,
smem_desc_start_a: Int32, # If TS, then this is the tmem start address for A
smem_desc_start_b: Int32,
idesc: int,
smem_desc_base_a: Optional[int],
smem_desc_base_b: int,
tCrA_layout: cute.Layout,
tCrB_layout: cute.Layout,
mbar_ptr: Optional[cutlass.Pointer] = None,
mbar_phase: Optional[Int32] = None,
zero_init: bool | Boolean = False,
cta_group: int = 1,
) -> None:
# acc_tmem_addr += acc_offset
is_ts = const_expr(smem_desc_base_a is None)
num_k_tile = cute.size(tCrA_layout.shape[2])
if const_expr(not is_ts):
smem_desc_base_a_lo, smem_desc_a_hi = i64_to_i32x2(smem_desc_base_a)
else:
smem_desc_base_a_lo, smem_desc_a_hi = None, None
smem_desc_base_b_lo, smem_desc_b_hi = i64_to_i32x2(smem_desc_base_b)
tCrA_layout = (
tCrA_layout
if const_expr(not is_ts)
# else cute.recast_layout(32, tCrA.element_type.width, tCrA_layout)
# currently hard-coding the width to 16
else cute.recast_layout(32, 16, tCrA_layout)
)
offset_a = [cute.crd2idx((0, 0, k), tCrA_layout) for k in range(num_k_tile)]
offset_a_diff = [offset_a[k] - offset_a[k - 1] for k in range(1, num_k_tile)]
offset_b = [cute.crd2idx((0, 0, k), tCrB_layout) for k in range(num_k_tile)]
offset_b_diff = [offset_b[k] - offset_b[k - 1] for k in range(1, num_k_tile)]
smem_desc_start_a_lo = None
if const_expr(not is_ts):
smem_desc_start_a_lo = Int32(smem_desc_base_a_lo | smem_desc_start_a)
# smem_desc_start_a_lo = smem_desc_start_a
smem_desc_start_b_lo = Int32(smem_desc_base_b_lo | smem_desc_start_b)
pred_str = "p" if isinstance(zero_init, Boolean) else "0" if zero_init else "1"
if const_expr(not is_ts):
assert mbar_ptr is None, "mbar_ptr must be None when a_src is not TMEM"
llvm.inline_asm(
None,
[
# acc.iterator.toint().ir_value(),
Int32(cute.arch.make_warp_uniform(smem_desc_start_a_lo)).ir_value(),
Int32(cute.arch.make_warp_uniform(smem_desc_start_b_lo)).ir_value(),
Int32(not zero_init).ir_value(),
Int32(cute.arch.make_warp_uniform(acc_tmem_addr)).ir_value(),
],
"{\n\t"
".reg .pred leader_thread;\n\t"
".reg .pred p;\n\t"
".reg .b32 idesc;\n\t"
".reg .b32 tmem_acc;\n\t"
".reg .b32 smem_desc_a_lo_start, smem_desc_b_lo_start;\n\t"
".reg .b32 smem_desc_a_lo, smem_desc_b_lo;\n\t"
".reg .b32 smem_desc_a_hi, smem_desc_b_hi;\n\t"
".reg .b64 smem_desc_a, smem_desc_b;\n\t"
"elect.sync _|leader_thread, -1;\n\t"
f"mov.b32 idesc, {hex(idesc)};\n\t"
# f"mov.b32 tmem_acc, {hex(acc_tmem_addr)};\n\t"
f"mov.b32 tmem_acc, $3;\n\t"
"mov.b32 smem_desc_a_lo_start, $0;\n\t"
"mov.b32 smem_desc_b_lo_start, $1;\n\t"
f"mov.b32 smem_desc_a_hi, {hex(smem_desc_a_hi)};\n\t"
f"mov.b32 smem_desc_b_hi, {hex(smem_desc_b_hi)};\n\t"
f"mov.b64 smem_desc_a, {{smem_desc_a_lo_start, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo_start, smem_desc_b_hi}};\n\t"
"setp.ne.b32 p, $2, 0;\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], smem_desc_a, smem_desc_b, idesc, {pred_str};\n\t"
+ "".join(
(
# f"add.u32 smem_desc_a_lo, smem_desc_a_lo, {hex(offset_a_diff[k - 1])};\n\t"
# f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"add.s32 smem_desc_a_lo, smem_desc_a_lo_start, {hex(offset_a[k])};\n\t"
f"add.s32 smem_desc_b_lo, smem_desc_b_lo_start, {hex(offset_b[k])};\n\t"
f"mov.b64 smem_desc_a, {{smem_desc_a_lo, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], smem_desc_a, smem_desc_b, idesc, 1;\n\t"
)
for k in range(1, num_k_tile)
)
+ "}\n",
# "r,r,r",
"r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
else:
input_args = [
Int32(cute.arch.make_warp_uniform(smem_desc_start_a)).ir_value(),
Int32(cute.arch.make_warp_uniform(smem_desc_start_b_lo)).ir_value(),
Int32(not zero_init).ir_value(),
Int32(cute.arch.make_warp_uniform(acc_tmem_addr)).ir_value(),
]
if const_expr(mbar_ptr is not None):
assert mbar_phase is not None, "mbar_phase must be provided when mbar_ptr is not None"
input_args.append(mbar_ptr.toint().ir_value())
input_args.append(Int32(mbar_phase).ir_value())
mbar_wait_str = (
".reg .pred P1; \n\t"
"LAB_WAIT: \n\t"
"mbarrier.try_wait.parity.shared::cta.b64 P1, [$4], $5, 10000000; \n\t"
"@P1 bra DONE; \n\t"
"bra LAB_WAIT; \n\t"
"DONE: \n\t"
)
else:
mbar_wait_str = ""
llvm.inline_asm(
None,
# [
# # acc.iterator.toint().ir_value(),
# Int32(tCrA_layout[None, None, 0].iterator.toint()).ir_value(),
# Int32(smem_desc_start_b_lo).ir_value(),
# Int32(not zero_init).ir_value(),
# ],
input_args,
"{\n\t"
".reg .pred leader_thread;\n\t"
".reg .pred p;\n\t"
".reg .b32 idesc;\n\t"
".reg .b32 tmem_acc;\n\t"
".reg .b32 tmem_a;\n\t"
".reg .b32 smem_desc_b_lo_start;\n\t"
".reg .b32 smem_desc_b_lo;\n\t"
".reg .b32 smem_desc_b_hi;\n\t"
".reg .b64 smem_desc_b;\n\t"
"elect.sync _|leader_thread, -1;\n\t"
f"mov.b32 idesc, {hex(idesc)};\n\t"
# f"mov.b32 tmem_acc, {hex(acc_tmem_addr)};\n\t"
f"mov.b32 tmem_acc, $3;\n\t"
f"mov.b32 tmem_a, $0;\n\t"
f"mov.b32 smem_desc_b_lo_start, $1;\n\t"
f"mov.b32 smem_desc_b_hi, {hex(smem_desc_b_hi)};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo_start, smem_desc_b_hi}};\n\t"
"setp.ne.b32 p, $2, 0;\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], [tmem_a], smem_desc_b, idesc, {pred_str};\n\t"
+ "".join(
(
# f"add.u32 tmem_a, tmem_a, {hex(offset_a_diff[k - 1])};\n\t"
# f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"add.u32 smem_desc_b_lo, smem_desc_b_lo_start, {hex(offset_b[k])};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
# f"@leader_thread tcgen05.mma.cta_group::1.kind::f16 [tmem_acc], [tmem_a], smem_desc_b, idesc, 1;\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], [tmem_a + {hex(offset_a[k])}], smem_desc_b, idesc, 1;\n\t"
)
for k in range(
1,
num_k_tile if const_expr(mbar_ptr is None) else num_k_tile // 4 * 3,
)
)
+ mbar_wait_str
+ (
"".join(
(
# f"add.u32 smem_desc_b_lo, smem_desc_b_lo, {hex(offset_b_diff[k - 1])};\n\t"
f"add.u32 smem_desc_b_lo, smem_desc_b_lo_start, {hex(offset_b[k])};\n\t"
f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], [tmem_a + {hex(offset_a[k])}], smem_desc_b, idesc, 1;\n\t"
)
for k in range(num_k_tile // 4 * 3, num_k_tile)
)
if const_expr(mbar_ptr is not None)
else ""
)
+ "}\n",
"r,r,r,r" if const_expr(mbar_ptr is None) else "r,r,r,r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@cute.jit
def declare_ptx_smem_desc(
smem_desc_start_a: Int32, # If TS, then this is the tmem start address for A
smem_desc_base_a: Optional[int],
tCrA_layout: cute.Layout,
var_name_prefix: str = "smem_desc",
) -> None:
is_ts = const_expr(smem_desc_base_a is None)
num_k_tile = cute.size(tCrA_layout.shape[2])
smem_desc_base_a_lo, smem_desc_a_hi = None, None
if const_expr(not is_ts):
smem_desc_base_a_lo, smem_desc_a_hi = i64_to_i32x2(smem_desc_base_a)
tCrA_layout = (
tCrA_layout
if const_expr(not is_ts)
# else cute.recast_layout(32, tCrA.element_type.width, tCrA_layout)
# currently hard-coding the width to 16
else cute.recast_layout(32, 16, tCrA_layout)
)
offset_a = [cute.crd2idx((0, 0, k), tCrA_layout) for k in range(num_k_tile)]
smem_desc_start_a_lo = None
if const_expr(not is_ts):
smem_desc_start_a_lo = Int32(smem_desc_base_a_lo | smem_desc_start_a)
if const_expr(not is_ts):
llvm.inline_asm(
None,
[Int32(cute.arch.make_warp_uniform(smem_desc_start_a_lo)).ir_value()],
f".reg .b32 {var_name_prefix}_lo;\n\t"
f".reg .b64 {var_name_prefix}_<{num_k_tile}>;\n\t"
f"mov.b64 {var_name_prefix}_0, {{$0, {hex(smem_desc_a_hi)}}};\n\t"
+ "".join(
(
f"add.s32 {var_name_prefix}_lo, $0, {hex(offset_a[k])};\n\t"
f"mov.b64 {var_name_prefix}_{k}, {{{var_name_prefix}_lo, {hex(smem_desc_a_hi)}}};\n\t"
)
for k in range(1, num_k_tile)
),
"r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@cute.jit
def declare_ptx_idesc(op: cute.nvgpu.tcgen05.mma.MmaOp, var_name: str = "idesc") -> None:
idesc = const_expr(sm100_desc.mma_op_to_idesc(op))
llvm.inline_asm(
None,
[],
f".reg .b32 {var_name};\n\t" # noqa
f"mov.b32 {var_name}, {hex(idesc)};\n\t",
constraints="",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
@cute.jit
def gemm_ptx_precomputed_varname(
acc_tmem_addr: Int32,
smem_desc_start_b: Int32,
# idesc: int,
smem_desc_base_b: int,
tCrB_layout: cute.Layout,
smem_var_name_prefix: str,
idesc_var_name: str,
smem_offset: int,
zero_init: bool | Boolean = False,
cta_group: int = 1,
) -> None:
is_ts = False
num_k_tile = cute.size(tCrB_layout.shape[2])
smem_desc_base_b_lo, smem_desc_b_hi = i64_to_i32x2(smem_desc_base_b)
offset_b = [cute.crd2idx((0, 0, k), tCrB_layout) for k in range(num_k_tile)]
smem_desc_start_b_lo = Int32(smem_desc_base_b_lo | smem_desc_start_b)
pred_str = "p" if isinstance(zero_init, Boolean) else "0" if zero_init else "1"
if const_expr(not is_ts):
llvm.inline_asm(
None,
[
Int32(cute.arch.make_warp_uniform(smem_desc_start_b_lo)).ir_value(),
Int32(not zero_init).ir_value(),
Int32(cute.arch.make_warp_uniform(acc_tmem_addr)).ir_value(),
],
"{\n\t"
".reg .pred leader_thread;\n\t"
".reg .pred p;\n\t"
# ".reg .b32 idesc;\n\t"
".reg .b32 tmem_acc;\n\t"
".reg .b32 smem_desc_b_lo_start;\n\t"
".reg .b32 smem_desc_a_lo, smem_desc_b_lo;\n\t"
".reg .b32 smem_desc_a_hi, smem_desc_b_hi;\n\t"
# ".reg .b64 smem_desc_b;\n\t"
f".reg .b64 smem_desc_b_<{num_k_tile}>;\n\t"
"elect.sync _|leader_thread, -1;\n\t"
# f"mov.b32 idesc, {hex(idesc)};\n\t"
# f"mov.b32 tmem_acc, {hex(acc_tmem_addr)};\n\t"
f"mov.b32 tmem_acc, $2;\n\t"
"mov.b32 smem_desc_b_lo_start, $0;\n\t"
f"mov.b32 smem_desc_b_hi, {hex(smem_desc_b_hi)};\n\t"
f"mov.b64 {{smem_desc_a_lo, smem_desc_a_hi}}, {smem_var_name_prefix}_0;\n\t"
f"add.s32 smem_desc_a_lo, smem_desc_a_lo, {smem_offset};\n\t"
f"mov.b64 {smem_var_name_prefix}_0, {{smem_desc_a_lo, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b_0, {{smem_desc_b_lo_start, smem_desc_b_hi}};\n\t"
+ "".join(
(
f"mov.b64 {{smem_desc_a_lo, smem_desc_a_hi}}, {smem_var_name_prefix}_{k};\n\t"
f"add.s32 smem_desc_a_lo, smem_desc_a_lo, {smem_offset};\n\t"
f"add.s32 smem_desc_b_lo, smem_desc_b_lo_start, {hex(offset_b[k])};\n\t"
f"mov.b64 {smem_var_name_prefix}_{k}, {{smem_desc_a_lo, smem_desc_a_hi}};\n\t"
f"mov.b64 smem_desc_b_{k}, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
)
for k in range(1, num_k_tile)
)
+ "setp.ne.b32 p, $1, 0;\n\t"
# f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], {smem_var_name_prefix}_0, smem_desc_b, idesc, {pred_str};\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], {smem_var_name_prefix}_0, smem_desc_b_0, {idesc_var_name}, {pred_str};\n\t"
+ "".join(
(
# f"mov.b64 {{smem_desc_a_lo, smem_desc_a_hi}}, {smem_var_name_prefix}_{k};\n\t"
# f"add.s32 smem_desc_a_lo, smem_desc_a_lo, {smem_offset};\n\t"
# f"add.s32 smem_desc_b_lo, smem_desc_b_lo_start, {hex(offset_b[k])};\n\t"
# f"mov.b64 {smem_var_name_prefix}_{k}, {{smem_desc_a_lo, smem_desc_a_hi}};\n\t"
# f"mov.b64 smem_desc_b, {{smem_desc_b_lo, smem_desc_b_hi}};\n\t"
# f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], {smem_var_name_prefix}_{k}, smem_desc_b, idesc, 1;\n\t"
# f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], {smem_var_name_prefix}_{k}, smem_desc_b, {idesc_var_name}, 1;\n\t"
f"@leader_thread tcgen05.mma.cta_group::{cta_group}.kind::f16 [tmem_acc], {smem_var_name_prefix}_{k}, smem_desc_b_{k}, {idesc_var_name}, 1;\n\t"
)
for k in range(1, num_k_tile)
)
+ "}\n",
"r,r,r",
has_side_effects=True,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/blackwell_helpers.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1052,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/flash_fwd_sm100.py | # Supported features:
# - BF16 & FP16 dtype
# - noncausal & causal attention
# - MHA, GQA, MQA
# - hdim 64, 96, 128, (192, 128).
# - varlen
# - sliding window
# - split-kv
# Unsupported features that will be added later:
# - page size != 128
# - more hdim (192, 256)
# Based on the cutlass example and cute-dsl example:
# https://github.com/NVIDIA/cutlass/tree/main/examples/77_blackwell_fmha
# https://github.com/NVIDIA/cutlass/blob/main/examples/python/CuTeDSL/blackwell/fmha.py
import enum
import math
from typing import Type, Tuple, Callable, Optional, Literal
from functools import partial
import cuda.bindings.driver as cuda
import cutlass
import cutlass.cute as cute
from cutlass import Float32, Int32, Int64, Boolean, const_expr
from cutlass.cute.nvgpu import cpasync
import cutlass.cute.nvgpu.tcgen05 as tcgen05
import cutlass.utils.blackwell_helpers as sm100_utils_basic
from cutlass import pipeline
from cutlass.pipeline import pipeline_init_arrive, pipeline_init_wait
from cutlass.base_dsl.arch import Arch
from cutlass.cutlass_dsl import BaseDSL
from quack import copy_utils, layout_utils
from flash_attn.cute.paged_kv import PagedKVManager
from flash_attn.cute.cute_dsl_utils import assume_tensor_aligned
import flash_attn.cute.pipeline as pipeline_custom
from flash_attn.cute.mask import AttentionMask
from flash_attn.cute.softmax import SoftmaxSm100, apply_score_mod_inner
from flash_attn.cute.seqlen_info import SeqlenInfoQK
from flash_attn.cute.block_info import BlockInfo
from flash_attn.cute.block_sparsity import BlockSparseTensors
from flash_attn.cute.block_sparse_utils import (
get_total_block_count,
produce_block_sparse_loads_sm100,
softmax_block_sparse_sm100,
handle_block_sparse_empty_tile_correction_sm100,
)
from flash_attn.cute.pack_gqa import PackGQA
from flash_attn.cute import mma_sm100_desc as sm100_desc
from flash_attn.cute import blackwell_helpers as sm100_utils
from cutlass.cute import FastDivmodDivisor
from quack.cute_dsl_utils import ParamsBase
from flash_attn.cute.tile_scheduler import (
TileSchedulerArguments,
SingleTileScheduler,
StaticPersistentTileScheduler,
SingleTileLPTScheduler,
SingleTileVarlenScheduler,
)
class NamedBarrierFwd(enum.IntEnum):
Epilogue = enum.auto() # starts from 1 as barrier 0 is reserved for sync_threads()
TmemPtr = enum.auto()
SoftmaxStatsW0 = enum.auto()
SoftmaxStatsW1 = enum.auto()
SoftmaxStatsW2 = enum.auto()
SoftmaxStatsW3 = enum.auto()
SoftmaxStatsW4 = enum.auto()
SoftmaxStatsW5 = enum.auto()
SoftmaxStatsW6 = enum.auto()
SoftmaxStatsW7 = enum.auto()
# WarpSchedulerWG1 = enum.auto()
# WarpSchedulerWG2 = enum.auto()
class FlashAttentionForwardSm100:
def __init__(
self,
# dtype: Type[cutlass.Numeric],
head_dim: int,
head_dim_v: Optional[int] = None,
qhead_per_kvhead: cutlass.Constexpr[int] = 1,
is_causal: bool = False,
is_local: bool = False,
is_split_kv: bool = False,
pack_gqa: bool = False,
q_subtile_factor: int | None = None,
m_block_size: int = 128,
n_block_size: int = 128,
q_stage: cutlass.Constexpr[int] = 2,
is_persistent: bool = True,
score_mod: cutlass.Constexpr | None = None,
mask_mod: cutlass.Constexpr | None = None,
has_aux_tensors: cutlass.Constexpr = False,
paged_kv_non_tma: bool = False,
is_varlen_q: bool = False,
use_2cta_instrs: bool = False,
):
self.use_tma_KV = not paged_kv_non_tma
# self.dtype = dtype
# padding head_dim to a multiple of 16 as k_block_size
hdim_multiple_of = 16
self.head_dim_padded = int(math.ceil(head_dim / hdim_multiple_of) * hdim_multiple_of)
head_dim_v = head_dim_v if head_dim_v is not None else head_dim
self.same_hdim_kv = head_dim == head_dim_v
self.head_dim_v_padded = int(math.ceil(head_dim_v / hdim_multiple_of) * hdim_multiple_of)
self.same_hdim_kv_padded = self.head_dim_padded == self.head_dim_v_padded
self.check_hdim_oob = head_dim != self.head_dim_padded
self.check_hdim_v_oob = head_dim_v != self.head_dim_v_padded
self.m_block_size = m_block_size
self.n_block_size = n_block_size
self.q_stage = q_stage
assert self.q_stage in [1, 2]
self.use_2cta_instrs = use_2cta_instrs
# If split_P_arrive, the softmax warps write some columns of P first, signal to the MMA warp
# to being the P @ V MMA, then write the rest of P and signal again. This allows some overlap
# between compute the last couple columns of P and the P @ V MMA.
self.split_P_arrive = n_block_size // 4 * 3
self.split_P_arrive = int(self.split_P_arrive / 32) * 32 # multiple of 32
assert self.split_P_arrive % 32 == 0
assert self.split_P_arrive < self.n_block_size
self.arch = BaseDSL._get_dsl().get_arch_enum()
assert self.arch >= Arch.sm_100 and self.arch <= Arch.sm_110f, "Only SM 10.x and 11.x are supported"
self.cta_group_size = 2 if self.use_2cta_instrs else 1
# cta_tiler M includes only 1 CTA, the scheduler will take into account the cluster shape
self.cta_tiler = (self.q_stage * m_block_size, n_block_size, self.head_dim_padded)
# With 2CTA, the MMA tiler M covers both CTAs, so it's cta_group_size * m_block_size.
# Each CTA owns m_block_size rows; the 2CTA MMA instruction spans both.
self.mma_tiler_qk = (self.cta_group_size * m_block_size, n_block_size, self.head_dim_padded)
self.mma_tiler_pv = (self.cta_group_size * m_block_size, self.head_dim_v_padded, n_block_size)
self.qk_acc_dtype = Float32
self.pv_acc_dtype = Float32
self.cluster_shape_mn = (2, 1) if self.use_2cta_instrs else (1, 1)
self.is_persistent = is_persistent
self.is_causal = is_causal
self.is_local = is_local
self.is_varlen_q = is_varlen_q
self.use_correction_warps_for_epi = is_varlen_q
self.qhead_per_kvhead = qhead_per_kvhead
self.is_split_kv = is_split_kv
self.pack_gqa = pack_gqa
self.q_subtile_factor = q_subtile_factor
if pack_gqa:
assert m_block_size % self.qhead_per_kvhead == 0, (
"For PackGQA, m_block_size must be divisible by qhead_per_kvhead"
)
assert not (self.is_split_kv and self.head_dim_v_padded >= 192), (
"SplitKV is not supported for hdim >= 192"
)
self.score_mod = score_mod
self.mask_mod = mask_mod
self.vec_size: cutlass.Constexpr = getattr(
score_mod, "__vec_size__", 1 if cutlass.const_expr(has_aux_tensors) else 2
)
# Does S1 need to wait for S0 to finish
# self.s0_s1_barrier = self.head_dim_padded in [64, 96] and (not self.is_causal and not self.is_local)
is_sm103 = self.arch >= Arch.sm_103 and self.arch <= Arch.sm_103f
# self.enable_ex2_emu = self.head_dim_padded <= 128 and not is_sm103
self.enable_ex2_emu = (self.head_dim_padded <= 128 or (self.head_dim_padded == 192 and self.use_2cta_instrs and not self.is_causal and not self.is_local)) and not is_sm103
self.s0_s1_barrier = False
self.overlap_sO_sQ = (
(self.head_dim_padded == 192 and self.head_dim_v_padded >= 64) or
(self.head_dim_v_padded >= 128 and self.is_split_kv)
)
if self.overlap_sO_sQ:
self.is_persistent = False
assert self.use_tma_KV or not (self.check_hdim_oob or self.check_hdim_v_oob), (
"Paged KV does not support irregular head dim"
)
self.softmax0_warp_ids = (0, 1, 2, 3)
self.softmax1_warp_ids = (4, 5, 6, 7)
self.correction_warp_ids = (8, 9, 10, 11)
self.mma_warp_id = 12
self.epilogue_warp_ids = (13,)
self.load_warp_ids = (14,)
self.empty_warp_ids = (15,)
self.tmem_alloc_cols = cute.arch.get_max_tmem_alloc_cols("sm_100")
self.threads_per_cta = cute.arch.WARP_SIZE * len(
(
*self.softmax0_warp_ids,
*self.softmax1_warp_ids,
*self.correction_warp_ids,
self.mma_warp_id,
*self.load_warp_ids,
*self.epilogue_warp_ids,
*self.empty_warp_ids,
)
)
if self.q_stage == 1:
if not self.use_tma_KV:
self.empty_warp_ids = self.empty_warp_ids + self.load_warp_ids
self.load_warp_ids = self.softmax1_warp_ids
else:
self.empty_warp_ids = self.empty_warp_ids + self.softmax1_warp_ids
self.softmax1_warp_ids = ()
elif not self.use_tma_KV:
self.load_warp_ids = (14, 15)
self.empty_warp_ids = ()
if self.use_correction_warps_for_epi:
self.empty_warp_ids = self.empty_warp_ids + self.epilogue_warp_ids
self.epilogue_warp_ids = self.correction_warp_ids
elif self.is_varlen_q: # fallback
self.epilogue_warp_ids = (13, 14)
self.tmem_s_offset = [0, self.n_block_size] # e.g., 0, 128
self.tmem_o_offset = [
self.tmem_s_offset[-1] + self.n_block_size + i * self.head_dim_v_padded
for i in range(self.q_stage)
] # e.g., 256, 384
self.tmem_total = self.tmem_o_offset[-1] + self.head_dim_v_padded
assert self.tmem_total <= self.tmem_alloc_cols
self.tmem_s_to_p_offset = self.n_block_size // 2
self.tmem_p_offset = [
self.tmem_s_offset[i] + self.tmem_s_to_p_offset for i in range(2)
] # 0, 128
# vec buffer for row_max & row_sum
self.tmem_vec_offset = self.tmem_s_offset
if self.head_dim_padded < 96:
self.num_regs_softmax = 200 if not paged_kv_non_tma else 184
self.num_regs_correction = 64
self.num_regs_other = 48 if not paged_kv_non_tma else 80
else:
# self.num_regs_softmax = 192 if self.is_causal or self.is_local else 184
if not self.enable_ex2_emu:
self.num_regs_softmax = 192 if not paged_kv_non_tma else 184
else:
# self.num_regs_softmax = 200 if not paged_kv_non_tma else 184
self.num_regs_softmax = 192 if not paged_kv_non_tma else 184
# self.num_regs_softmax = 176
# self.num_regs_correction = 96
# self.num_regs_correction = 64 if self.is_causal or self.is_local else 80
if not self.enable_ex2_emu:
self.num_regs_correction = 80 if not paged_kv_non_tma else 64
else:
# self.num_regs_correction = 64
self.num_regs_correction = 80 if not paged_kv_non_tma else 64
# self.num_regs_other = 32
# self.num_regs_other = 64
# self.num_regs_other = 80
self.num_regs_other = 48 if not paged_kv_non_tma else 80
# self.num_regs_other = 96 if self.is_causal or self.is_local else 80
# self.num_regs_other = 64 if self.is_causal or self.is_local else 80
self.buffer_align_bytes = 1024
def _setup_attributes(self):
"""Set up configurations and parameters for the FMHA kernel operation.
This method initializes and configures various attributes required for the
execution of the fused multi-head attention kernel, mainly about the pipeline stages:
- Sets up staging parameters for Q, K, V inputs and accumulator data
- Configures pipeline stages for softmax, correction, and epilogue operations
"""
smem_size_q = self.q_stage * self.m_block_size * self.head_dim_padded * self.q_dtype.width // 8
smem_size_o = self.q_stage * self.m_block_size * self.head_dim_v_padded * self.o_dtype.width // 8
smem_size_q_o = smem_size_q + smem_size_o if not self.overlap_sO_sQ else max(smem_size_q, smem_size_o)
smem_size_k_per_stage = self.n_block_size * self.head_dim_padded * self.k_dtype.width // 8
smem_size_v_per_stage = self.n_block_size * self.head_dim_v_padded * self.v_dtype.width // 8
smem_size_kv_per_stage = max(smem_size_k_per_stage, smem_size_v_per_stage) // self.cta_group_size
kv_stage = (224 * 1024 - smem_size_q_o) // smem_size_kv_per_stage
if self.head_dim_padded == 192 and self.head_dim_v_padded == 128 and kv_stage == 2:
# For hdim 192,128, we can fit 3 stages if we use uneven_kv_smem
kv_stage = 3
self.kv_stage = kv_stage
# print("kv_stage", self.kv_stage)
self.s_stage = 2
assert self.s_stage >= self.q_stage
# For hdim 192,128 1CTA, we don't have enough smem to store all 3 stages of KV:
# 128 x 192 x 2 bytes x 3 stages = 144KB, and we need 96KB for Q.
# Instead we store smem as [smem_large, smem_small, smem_large], where smem_large is
# 128 x 192 and smem_small is 128 x 128. We set the stride between the stages to be
# 128 * 160, so that indexing the 0th and 2nd stages will get the right address,
# but for the 1st stage we need to add or subtract (depending on phase) 128 x 64.
self.uneven_kv_smem = (
self.head_dim_padded == 192 and self.head_dim_v_padded == 128 and self.kv_stage == 3
)
self.uneven_kv_smem_offset = (
self.m_block_size * (self.head_dim_padded - self.head_dim_v_padded) // 2
if self.uneven_kv_smem
else 0
)
assert self.uneven_kv_smem_offset % 1024 == 0
@cute.jit
def __call__(
self,
mQ: cute.Tensor, # (b, s_q, h, d) or (total_q, h, d) if there is cu_seqlens_q
mK: cute.Tensor, # (b_k, s_k, h_k, d) or (total_k, h_k, d) if there is cu_seqlens_k or (num_pages, page_size, h_k, d) if there is page_table
mV: cute.Tensor, # (b_k, s_k, h_k, dv) or (total_k, h_k, dv) if there is cu_seqlens_k or (num_pages, page_size, h_k, dv) if there is page_table
mO: cute.Tensor, # (b, s_q, h, dv) or (total_q, h, dv) if there is cu_seqlens_q
mLSE: Optional[cute.Tensor],
softmax_scale: Float32,
stream: cuda.CUstream,
mCuSeqlensQ: Optional[cute.Tensor] = None,
mCuSeqlensK: Optional[cute.Tensor] = None,
mSeqUsedQ: Optional[cute.Tensor] = None,
mSeqUsedK: Optional[cute.Tensor] = None,
mPageTable: Optional[cute.Tensor] = None, # (b_k, max_num_pages_per_seq)
window_size_left: Int32 | int | None = None,
window_size_right: Int32 | int | None = None,
learnable_sink: Optional[cute.Tensor] = None,
blocksparse_tensors: Optional[BlockSparseTensors] = None,
aux_tensors: Optional[list] = None,
):
"""Execute the Fused Multi-Head Attention operation on the provided tensors.
This method prepares the input tensors for processing, validates their shapes and types,
configures the computation parameters, and launches the CUDA kernel.
The method handles:
1. Tensor layout transformations for specific memory access patterns
2. Validation of tensor shapes and data types
3. Initialization of hardware-specific parameters and memory layouts
4. Configuration of TMA (Tensor Memory Access) operations
5. Grid and work scheduling computation
6. Kernel launch with appropriate parameters
"""
# setup static attributes before smem/grid/tma computation
self.q_dtype = mQ.element_type
self.k_dtype = mK.element_type
self.v_dtype = mV.element_type
self.o_dtype = mO.element_type
mQ, mK, mV, mO = [assume_tensor_aligned(t) for t in (mQ, mK, mV, mO)]
Q_layout_transpose = [1, 3, 2, 0] if const_expr(mCuSeqlensQ is None) else [0, 2, 1]
mQ = cute.make_tensor(mQ.iterator, cute.select(mQ.layout, mode=Q_layout_transpose))
# (s_k, d, h_k, b_k) or (total_k, d, h_k) if there's cu_seqlens_k or (page_size, d, h_k, num_pages) if there's page_table
KV_layout_transpose = [1, 3, 2, 0] if const_expr(mCuSeqlensK is None) else [0, 2, 1]
mK, mV = [
cute.make_tensor(t.iterator, cute.select(t.layout, mode=KV_layout_transpose))
for t in (mK, mV)
]
if const_expr(self.is_split_kv):
O_layout_transpose = [2, 4, 3, 1, 0] if const_expr(mCuSeqlensQ is None) else [1, 3, 2, 0]
LSE_layout_transpose = [3, 2, 1, 0] if const_expr(mCuSeqlensQ is None) else [2, 1, 0]
num_splits = mO.shape[0]
else:
O_layout_transpose = [1, 3, 2, 0] if const_expr(mCuSeqlensQ is None) else [0, 2, 1]
LSE_layout_transpose = [2, 1, 0] if const_expr(mCuSeqlensQ is None) else [1, 0]
num_splits = Int32(1)
mO = cute.make_tensor(mO.iterator, cute.select(mO.layout, mode=O_layout_transpose))
mLSE = (
cute.make_tensor(mLSE.iterator, cute.select(mLSE.layout, mode=LSE_layout_transpose))
if const_expr(mLSE is not None)
else None
)
# (s, d, h, b) -> (d, s, h, b)
V_layout_transpose = [1, 0, 2, 3] if const_expr(mCuSeqlensK is None) else [1, 0, 2]
mV = cute.make_tensor(mV.iterator, cute.select(mV.layout, mode=V_layout_transpose))
# check type consistency
if const_expr(self.q_dtype != self.k_dtype):
raise TypeError(f"Type mismatch: {self.q_dtype} != {self.k_dtype}")
if const_expr(self.q_dtype != self.v_dtype):
raise TypeError(f"Type mismatch: {self.q_dtype} != {self.v_dtype}")
self._setup_attributes()
self.use_tma_O = self.arch >= Arch.sm_90 and mCuSeqlensQ is None and mSeqUsedQ is None
# This can be tuned
# This is currently very ad-hoc, we should tune it systematically
self.ex2_emu_freq = 0
# self.ex2_emu_start_frg = 1 if self.is_causal else 0
self.ex2_emu_start_frg = 1
if const_expr(self.enable_ex2_emu):
self.ex2_emu_freq = 16
if const_expr(self.head_dim_padded == 128 and self.use_2cta_instrs):
self.ex2_emu_freq = 12
if const_expr(
self.pack_gqa and self.head_dim_padded > 64 and not self.is_causal and not self.is_local
):
self.ex2_emu_freq = 32 if mCuSeqlensQ is not None or mSeqUsedQ is not None else 10
if const_expr(self.head_dim_padded > 64 and self.is_causal):
self.ex2_emu_freq = 10
cta_group = tcgen05.CtaGroup.TWO if self.use_2cta_instrs else tcgen05.CtaGroup.ONE
q_major_mode = tcgen05.OperandMajorMode.K
k_major_mode = tcgen05.OperandMajorMode.K
v_major_mode = tcgen05.OperandMajorMode.MN
self.o_layout = cutlass.utils.LayoutEnum.from_tensor(mO)
# the intermediate tensor p is from tmem & mK-major
p_source = tcgen05.OperandSource.TMEM
p_major_mode = tcgen05.OperandMajorMode.K
tiled_mma_qk = sm100_utils_basic.make_trivial_tiled_mma(
self.q_dtype,
q_major_mode,
k_major_mode,
self.qk_acc_dtype,
cta_group,
self.mma_tiler_qk[:2],
)
tiled_mma_pv = sm100_utils_basic.make_trivial_tiled_mma(
self.v_dtype,
p_major_mode,
v_major_mode,
self.pv_acc_dtype,
cta_group,
self.mma_tiler_pv[:2],
p_source,
)
self.cluster_shape_mnk = (*self.cluster_shape_mn, 1)
cta_layout_vmnk = cute.tiled_divide(
cute.make_layout(self.cluster_shape_mnk), (tiled_mma_qk.thr_id.shape,)
)
# epi_tile is per-CTA (not full 2CTA) since each CTA writes its own O portion
self.epi_tile = (self.m_block_size, self.head_dim_v_padded)
sQ_layout = sm100_utils_basic.make_smem_layout_a(
tiled_mma_qk, self.mma_tiler_qk, self.q_dtype, self.q_stage
)
sK_layout = sm100_utils_basic.make_smem_layout_b(
tiled_mma_qk, self.mma_tiler_qk, self.k_dtype, self.kv_stage
)
tP_layout = sm100_utils_basic.make_smem_layout_a(
tiled_mma_pv, self.mma_tiler_pv, self.q_dtype, self.s_stage
)
sV_layout = sm100_utils_basic.make_smem_layout_b(
tiled_mma_pv, self.mma_tiler_pv, self.v_dtype, self.kv_stage
)
sO_layout = sm100_utils_basic.make_smem_layout_epi(
self.o_dtype, self.o_layout, self.epi_tile, self.q_stage
)
if const_expr(not self.same_hdim_kv_padded):
# sK and sV are using the same physical smem so we need to adjust the stride so that they line up
stride_sK = const_expr(
max(sK_layout.outer.stride[-1], 0)
) # take max to turn tuple to Int32
stride_sV = const_expr(max(sV_layout.outer.stride[-1], 0))
stage_stride = const_expr(
max(stride_sK, stride_sV)
if not self.uneven_kv_smem
else (stride_sK + stride_sV) // 2
)
sK_layout = cute.make_composed_layout(
sK_layout.inner,
0,
cute.make_layout(
(*sK_layout.outer.shape[:-1], self.kv_stage),
stride=(*sK_layout.outer.stride[:-1], stage_stride),
),
)
sV_layout = cute.make_composed_layout(
sV_layout.inner,
0,
cute.make_layout(
(*sV_layout.outer.shape[:-1], self.kv_stage),
stride=(*sV_layout.outer.stride[:-1], stage_stride),
),
)
if const_expr(self.pack_gqa):
shape_Q_packed = (
(self.qhead_per_kvhead, mQ.shape[0]),
mQ.shape[1],
mK.shape[2],
*mQ.shape[3:],
)
stride_Q_packed = (
(mQ.stride[2], mQ.stride[0]),
mQ.stride[1],
mQ.stride[2] * self.qhead_per_kvhead,
*mQ.stride[3:],
)
mQ = cute.make_tensor(
mQ.iterator, cute.make_layout(shape_Q_packed, stride=stride_Q_packed)
)
shape_O_packed = (
(self.qhead_per_kvhead, mO.shape[0]),
mO.shape[1],
mK.shape[2],
*mO.shape[3:],
)
stride_O_packed = (
(mO.stride[2], mO.stride[0]),
mO.stride[1],
mO.stride[2] * self.qhead_per_kvhead,
*mO.stride[3:],
)
mO = cute.make_tensor(
mO.iterator, cute.make_layout(shape_O_packed, stride=stride_O_packed)
)
if const_expr(mLSE is not None):
shape_LSE_packed = (
(self.qhead_per_kvhead, mLSE.shape[0]),
mK.shape[2],
*mLSE.shape[2:],
)
stride_LSE_packed = (
(mLSE.stride[1], mLSE.stride[0]),
mLSE.stride[1] * self.qhead_per_kvhead,
*mLSE.stride[2:],
)
mLSE = cute.make_tensor(
mLSE.iterator, cute.make_layout(shape_LSE_packed, stride=stride_LSE_packed)
)
self.tma_copy_bytes = {
name: cute.size_in_bytes(mX.element_type, cute.select(layout, mode=[0, 1, 2]))
for name, mX, layout in [
("Q", mQ, sQ_layout),
("K", mK, sK_layout),
("V", mV, sV_layout),
]
}
for name in ("Q", "K", "V"):
self.tma_copy_bytes[name] *= self.cta_group_size
# TMA load for Q
tma_load_op = cpasync.CopyBulkTensorTileG2SOp(cta_group)
tma_store_op = cpasync.CopyBulkTensorTileS2GOp()
tma_atom_Q, mQ = cute.nvgpu.make_tiled_tma_atom_A(
tma_load_op,
mQ,
cute.select(sQ_layout, mode=[0, 1, 2]),
self.mma_tiler_qk,
tiled_mma_qk,
cta_layout_vmnk.shape,
)
tma_atom_K = None
tma_atom_V = None
if const_expr(self.use_tma_KV):
# TMA load for K
tma_atom_K, mK = cute.nvgpu.make_tiled_tma_atom_B(
tma_load_op,
mK,
cute.select(sK_layout, mode=[0, 1, 2]),
self.mma_tiler_qk,
tiled_mma_qk,
cta_layout_vmnk.shape,
)
# TMA load for V
tma_atom_V, mV = cute.nvgpu.make_tiled_tma_atom_B(
tma_load_op,
mV,
cute.select(sV_layout, mode=[0, 1, 2]),
self.mma_tiler_pv,
tiled_mma_pv,
cta_layout_vmnk.shape,
)
self.num_epilogue_threads = cute.arch.WARP_SIZE * len(self.epilogue_warp_ids)
if const_expr(self.use_tma_O):
tma_atom_O, mO = cpasync.make_tiled_tma_atom(
tma_store_op, mO, cute.select(sO_layout, mode=[0, 1]), self.epi_tile
)
gmem_tiled_copy_O = None
else:
tma_atom_O = None
universal_copy_bits = 128
async_copy_elems = universal_copy_bits // self.o_dtype.width
atom_universal_copy = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(),
self.o_dtype,
num_bits_per_copy=universal_copy_bits,
)
tO_shape_dim_1 = sO_layout.outer.shape[1][0] // async_copy_elems
tO_layout = cute.make_ordered_layout(
(self.num_epilogue_threads // tO_shape_dim_1, tO_shape_dim_1),
order=(1, 0),
)
# So that we don't have to check if we overshoot kBlockM when we store O
assert self.m_block_size % tO_layout.shape[0] == 0
vO_layout = cute.make_layout((1, async_copy_elems))
gmem_tiled_copy_O = cute.make_tiled_copy_tv(atom_universal_copy, tO_layout, vO_layout)
if const_expr(mCuSeqlensQ is not None or mSeqUsedQ is not None):
TileScheduler = SingleTileVarlenScheduler
else:
if const_expr(self.is_causal or self.is_local):
TileScheduler = SingleTileLPTScheduler
else:
TileScheduler = (
SingleTileScheduler
if const_expr(not self.is_persistent)
else StaticPersistentTileScheduler
)
tile_sched_args = TileSchedulerArguments(
cute.ceil_div(cute.size(mQ.shape[0]), self.cta_tiler[0]),
cute.size(mQ.shape[2]),
cute.size(mQ.shape[3])
if const_expr(mCuSeqlensQ is None)
else cute.size(mCuSeqlensQ.shape[0] - 1),
num_splits,
cute.size(mK.shape[0])
if const_expr(mPageTable is None)
else mK.shape[0] * mPageTable.shape[1],
mQ.shape[1],
mV.shape[0], # Note that this is different from Sm90 since we transpose mV in Sm100
total_q=cute.size(mQ.shape[0])
if const_expr(mCuSeqlensQ is not None)
else cute.size(mQ.shape[0]) * cute.size(mQ.shape[3]),
tile_shape_mn=self.cta_tiler[:2],
mCuSeqlensQ=mCuSeqlensQ,
mSeqUsedQ=mSeqUsedQ,
qhead_per_kvhead_packgqa=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
element_size=self.k_dtype.width // 8,
is_persistent=self.is_persistent,
lpt=self.is_causal or self.is_local,
is_split_kv=self.is_split_kv,
cluster_shape_mn=self.cluster_shape_mn,
)
tile_sched_params = TileScheduler.to_underlying_arguments(tile_sched_args)
self.tile_scheduler_cls = TileScheduler
grid_dim = TileScheduler.get_grid_shape(tile_sched_params)
sO_size = cute.cosize(sO_layout) if const_expr(not self.overlap_sO_sQ) else 0
sQ_size = (
cute.cosize(sQ_layout) if const_expr(not self.overlap_sO_sQ) else
cutlass.max(cute.cosize(sQ_layout), cute.cosize(sO_layout) * self.o_dtype.width // self.q_dtype.width)
)
@cute.struct
class SharedStorage:
# m_barriers for pipelines
mbar_load_Q: cute.struct.MemRange[Int64, self.q_stage * 2]
mbar_load_KV: cute.struct.MemRange[Int64, self.kv_stage * 2]
mbar_S_full_P_full_O_rescaled: cute.struct.MemRange[Int64, self.q_stage * 2]
mbar_P_full_lastsplit: cute.struct.MemRange[Int64, self.q_stage * 2]
mbar_O_full: cute.struct.MemRange[Int64, self.q_stage * 2]
mbar_softmax_stats: cute.struct.MemRange[Int64, self.q_stage * 2]
# mbar_softmax_stats: cute.struct.MemRange[Int64, self.q_stage * 4 * 2]
mbar_O_epi: cute.struct.MemRange[Int64, self.q_stage * 2]
mbar_s0_s1_sequence: cute.struct.MemRange[Int64, 2 * 2]
# Tmem dealloc cluster barrier
tmem_dealloc_mbar_ptr: Int64
# Tmem holding buffer
tmem_holding_buf: Int32
# Smem tensors
# store row max and row sum
sScale: cute.struct.MemRange[Float32, self.q_stage * self.m_block_size * 2]
sO: cute.struct.Align[
cute.struct.MemRange[self.o_dtype, sO_size], self.buffer_align_bytes
]
sQ: cute.struct.Align[
cute.struct.MemRange[self.q_dtype, sQ_size], self.buffer_align_bytes
]
sK: cute.struct.Align[
# cute.cosize(sK_layout) is correct even in the case of self.uneven_kv_smem
cute.struct.MemRange[self.k_dtype, cute.cosize(sK_layout)],
self.buffer_align_bytes,
]
self.shared_storage = SharedStorage
LOG2_E = math.log2(math.e)
if const_expr(self.score_mod is None):
softmax_scale_log2 = softmax_scale * LOG2_E
softmax_scale = None
else:
# NB: If a users passes in a score mod, we want to apply the score-mod in the sm_scaled qk
# But in the original base 10. We hijack softmax_scale_log2 to just be the change of base
# and correctly apply the softmax_scale prior to score_mod in the softmax step
softmax_scale_log2 = LOG2_E
softmax_scale = softmax_scale
if const_expr(window_size_left is not None):
window_size_left = Int32(window_size_left)
if const_expr(window_size_right is not None):
window_size_right = Int32(window_size_right)
fastdiv_mods = None
if cutlass.const_expr(aux_tensors is not None):
seqlen_q = cute.size(mQ.shape[0]) // (
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1
)
seqlen_k = (
cute.size(mK.shape[0])
if const_expr(mPageTable is None)
else mK.shape[0] * mPageTable.shape[1]
)
seqlen_q_divmod = FastDivmodDivisor(seqlen_q)
seqlen_k_divmod = FastDivmodDivisor(seqlen_k)
fastdiv_mods = (seqlen_q_divmod, seqlen_k_divmod)
head_divmod = None
if cutlass.const_expr(self.pack_gqa):
head_divmod = FastDivmodDivisor(self.qhead_per_kvhead)
self.use_block_sparsity = cutlass.const_expr(blocksparse_tensors is not None)
if cutlass.const_expr(self.use_block_sparsity and mPageTable is not None):
raise NotImplementedError("Block sparsity + paged KV not supported on SM100")
# Launch the kernel synchronously
self.kernel(
mQ,
mK,
mV,
mO,
mLSE,
mCuSeqlensQ,
mCuSeqlensK,
mSeqUsedQ,
mSeqUsedK,
mPageTable,
tma_atom_Q,
tma_atom_K,
tma_atom_V,
tma_atom_O,
softmax_scale_log2,
softmax_scale,
window_size_left,
window_size_right,
learnable_sink,
blocksparse_tensors,
sQ_layout,
sK_layout,
tP_layout,
sV_layout,
sO_layout,
gmem_tiled_copy_O,
tiled_mma_qk,
tiled_mma_pv,
tile_sched_params,
num_splits,
aux_tensors,
fastdiv_mods,
head_divmod,
).launch(
grid=grid_dim,
block=[self.threads_per_cta, 1, 1],
cluster=self.cluster_shape_mnk if cute.size(self.cluster_shape_mnk) > 1 else None,
stream=stream,
min_blocks_per_mp=1,
)
# GPU device kernel
@cute.kernel
def kernel(
self,
mQ: cute.Tensor, # (s_q, d, h, b) or (total_q, d, h) if there is cu_seqlens_q
mK: cute.Tensor, # (s_k, d, h_k, b_k) or (total_k, d, h_k) if there is cu_seqlens_k or (page_size, d, h_k, num_pages) if there is page_table
mV: cute.Tensor, # (d, s_k, h_k, b_k) or (d, total_k, h_k) if there is cu_seqlens_k or (d, page_size, h_k, num_pages) if there is page_table
mO: cute.Tensor,
mLSE: Optional[cute.Tensor],
mCuSeqlensQ: Optional[cute.Tensor],
mCuSeqlensK: Optional[cute.Tensor],
mSeqUsedQ: Optional[cute.Tensor],
mSeqUsedK: Optional[cute.Tensor],
mPageTable: Optional[cute.Tensor],
tma_atom_Q: cute.CopyAtom,
tma_atom_K: Optional[cute.CopyAtom],
tma_atom_V: Optional[cute.CopyAtom],
tma_atom_O: Optional[cute.CopyAtom],
softmax_scale_log2: Float32,
softmax_scale: Float32 | None,
window_size_left: Optional[Int32],
window_size_right: Optional[Int32],
learnable_sink: Optional[cute.Tensor],
blocksparse_tensors: Optional[BlockSparseTensors],
sQ_layout: cute.ComposedLayout,
sK_layout: cute.ComposedLayout,
tP_layout: cute.ComposedLayout,
sV_layout: cute.ComposedLayout,
sO_layout: cute.ComposedLayout,
gmem_tiled_copy_O: Optional[cute.TiledCopy],
tiled_mma_qk: cute.TiledMma,
tiled_mma_pv: cute.TiledMma,
tile_sched_params: ParamsBase,
num_splits: Int32,
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
head_divmod=None,
):
"""The device kernel implementation of the Fused Multi-Head Attention.
This kernel coordinates multiple specialized warps to perform different phases of the FMHA computation:
1. Load warp: Loads Q, K, V data from global memory to shared memory using TMA
2. MMA warp: Performs matrix multiplications (Q*K^T and P*V)
3. Softmax warps: Compute softmax normalization on attention scores
4. Correction warps: Apply adjustments to intermediate results
5. Epilogue warp: Handles final output transformation and storage
The kernel implements a complex pipeline with overlapping computation and memory operations,
using tensor memory access (TMA) for efficient data loading, warp specialization for different
computation phases, and optional attention masking.
"""
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx())
# Prefetch tma descriptor
if warp_idx == 0:
for tma_atom in (tma_atom_Q, tma_atom_K, tma_atom_V, tma_atom_O):
if const_expr(tma_atom is not None):
cpasync.prefetch_descriptor(tma_atom)
cta_layout_vmnk = cute.tiled_divide(
cute.make_layout(self.cluster_shape_mnk), (tiled_mma_qk.thr_id.shape,)
)
# Setup cta/thread coordinates
bidx, _, _ = cute.arch.block_idx()
if const_expr(cute.size(tiled_mma_qk.thr_id.shape) == 1):
mma_tile_coord_v = 0
else:
mma_tile_coord_v = bidx % cute.size(tiled_mma_qk.thr_id.shape)
is_leader_cta = mma_tile_coord_v == 0
# Alloc
smem = cutlass.utils.SmemAllocator()
storage = smem.allocate(self.shared_storage)
tmem_alloc_barrier = pipeline.NamedBarrier(
barrier_id=int(NamedBarrierFwd.TmemPtr),
num_threads=cute.arch.WARP_SIZE * len(
(self.mma_warp_id,
*self.softmax0_warp_ids,
*self.softmax1_warp_ids,
*self.correction_warp_ids)
),
)
# Tensor memory dealloc barrier init
tmem = cutlass.utils.TmemAllocator(
storage.tmem_holding_buf,
barrier_for_retrieve=tmem_alloc_barrier,
allocator_warp_id=self.mma_warp_id,
is_two_cta=self.use_2cta_instrs,
two_cta_tmem_dealloc_mbar_ptr=storage.tmem_dealloc_mbar_ptr,
)
ThreadCooperativeGroup = partial(pipeline.CooperativeGroup, pipeline.Agent.Thread)
mma_warp = ThreadCooperativeGroup(len([self.mma_warp_id]))
load_warps = ThreadCooperativeGroup(len(self.load_warp_ids))
tma_warp = ThreadCooperativeGroup(1)
softmax_warps = ThreadCooperativeGroup(len(self.softmax0_warp_ids))
softmax_threads = ThreadCooperativeGroup(cute.arch.WARP_SIZE * len(self.softmax0_warp_ids))
# softmax_threads = ThreadCooperativeGroup(cute.arch.WARP_SIZE)
correction_threads = ThreadCooperativeGroup(
cute.arch.WARP_SIZE * len(self.correction_warp_ids)
)
# correction_threads = ThreadCooperativeGroup(cute.arch.WARP_SIZE)
softmax_correction_threads = ThreadCooperativeGroup(
cute.arch.WARP_SIZE * len(self.softmax0_warp_ids + self.correction_warp_ids)
)
epilogue_threads = ThreadCooperativeGroup(cute.arch.WARP_SIZE * len(self.epilogue_warp_ids))
# For UMMA-bridging pipelines: the non-MMA side spans both CTAs in the cluster,
# so the thread count must include warps from both CTAs.
softmax_warps_cluster = ThreadCooperativeGroup(
len(self.softmax0_warp_ids) * self.cta_group_size
)
correction_threads_cluster = ThreadCooperativeGroup(
cute.arch.WARP_SIZE * len(self.correction_warp_ids) * self.cta_group_size
)
softmax_correction_threads_cluster = ThreadCooperativeGroup(
cute.arch.WARP_SIZE * len(self.softmax0_warp_ids + self.correction_warp_ids) * self.cta_group_size
)
pipeline_q = pipeline_custom.PipelineTmaUmma.create(
barrier_storage=storage.mbar_load_Q.data_ptr(),
num_stages=self.q_stage,
producer_group=tma_warp,
consumer_group=mma_warp,
tx_count=self.tma_copy_bytes["Q"],
cta_layout_vmnk=cta_layout_vmnk,
defer_sync=True,
)
if const_expr(self.use_tma_KV):
pipeline_kv = pipeline_custom.PipelineTmaUmma.create(
barrier_storage=storage.mbar_load_KV.data_ptr(),
num_stages=self.kv_stage,
producer_group=tma_warp,
consumer_group=mma_warp,
tx_count=self.tma_copy_bytes["K"],
cta_layout_vmnk=cta_layout_vmnk,
defer_sync=True,
)
else:
cpasync_producer_group = pipeline.CooperativeGroup(
pipeline.Agent.Thread, len(self.load_warp_ids) * cute.arch.WARP_SIZE
)
pipeline_kv = pipeline.PipelineAsyncUmma.create(
barrier_storage=storage.mbar_load_KV.data_ptr(),
num_stages=self.kv_stage,
producer_group=cpasync_producer_group,
consumer_group=mma_warp,
cta_layout_vmnk=cta_layout_vmnk,
defer_sync=True,
)
# This pipeline is not the typical producer-consumer pipeline. The "producer" mma warp
# uses it to signal that S is ready, and the softmax threads wait for S to be ready.
# When softmax threads write P to tmem and the correction threads have rescaled O, they
# signal as "consumer". The mma warp then waits for that signal to do the P @ V gemm.
pipeline_s_p_o = pipeline_custom.PipelineUmmaAsync.create(
barrier_storage=storage.mbar_S_full_P_full_O_rescaled.data_ptr(),
num_stages=self.q_stage,
producer_group=mma_warp,
consumer_group=softmax_correction_threads_cluster,
cta_layout_vmnk=cta_layout_vmnk,
defer_sync=True,
)
pipeline_p_lastsplit = pipeline_custom.PipelineAsyncUmma.create(
barrier_storage=storage.mbar_P_full_lastsplit.data_ptr(),
num_stages=self.q_stage,
producer_group=softmax_warps_cluster,
consumer_group=mma_warp,
cta_layout_vmnk=cta_layout_vmnk,
defer_sync=True,
)
# MMA warp uses this to signal to the correction warps that O is ready.
pipeline_o_acc = pipeline_custom.PipelineUmmaAsync.create(
barrier_storage=storage.mbar_O_full.data_ptr(),
num_stages=self.q_stage,
producer_group=mma_warp,
consumer_group=correction_threads_cluster,
cta_layout_vmnk=cta_layout_vmnk,
defer_sync=True,
)
pipeline_s0_s1_sequence = None
if const_expr(self.s0_s1_barrier and self.q_stage > 1):
# This is not a typical producer-consumer pipeline. We will directly use
# pipeline_s0_s1_sequence.sync_object_full and will not use
# pipeline_s0_s1_sequence.sync_object_empty.
pipeline_s0_s1_sequence = pipeline_custom.PipelineAsync.create(
barrier_storage=storage.mbar_s0_s1_sequence.data_ptr(),
num_stages=2,
producer_group=softmax_threads,
consumer_group=softmax_threads,
defer_sync=True,
)
pipeline_sm_stats = pipeline_custom.PipelineAsync.create(
barrier_storage=storage.mbar_softmax_stats.data_ptr(),
num_stages=self.q_stage,
producer_group=softmax_threads,
consumer_group=correction_threads,
defer_sync=True,
)
# Should put the NamedBarrier inside the pipeline class so we'll just have pipeline_sm_stats
sm_stats_barrier = pipeline_custom.NamedBarrier(
barrier_id=int(NamedBarrierFwd.SoftmaxStatsW0), num_threads=cute.arch.WARP_SIZE * 2
)
pipeline_o_epi = None
if const_expr(not self.use_correction_warps_for_epi):
pipeline_o_epi = pipeline_custom.PipelineAsync.create(
barrier_storage=storage.mbar_O_epi.data_ptr(),
num_stages=self.q_stage,
producer_group=correction_threads,
consumer_group=epilogue_threads,
defer_sync=True,
)
# Cluster arrive after barrier init
pipeline_init_arrive(cluster_shape_mn=cta_layout_vmnk, is_relaxed=True)
# Generate smem tensor Q/K/V/O
# (MMA, MMA_Q, MMA_D, PIPE)
sQ = storage.sQ.get_tensor(sQ_layout.outer, swizzle=sQ_layout.inner)
# (MMA, MMA_K, MMA_D, PIPE)
sK = storage.sK.get_tensor(sK_layout.outer, swizzle=sK_layout.inner)
# (MMA, MMA_K, MMA_D, PIPE)
# Strip swizzle info to reuse smem
sV = cute.make_tensor(cute.recast_ptr(sK.iterator, sV_layout.inner), sV_layout.outer)
if const_expr(not self.overlap_sO_sQ):
sO = storage.sO.get_tensor(sO_layout.outer, swizzle=sO_layout.inner)
else:
sO = cute.make_tensor(cute.recast_ptr(sQ.iterator, sO_layout.inner, self.o_dtype), sO_layout.outer)
sScale = storage.sScale.get_tensor(cute.make_layout(self.q_stage * self.m_block_size * 2))
thr_mma_qk = tiled_mma_qk.get_slice(mma_tile_coord_v)
thr_mma_pv = tiled_mma_pv.get_slice(mma_tile_coord_v)
qk_acc_shape = thr_mma_qk.partition_shape_C(self.mma_tiler_qk[:2])
# This is a fake tensor, by right we need to retrieve tmem_ptr. But we know that we always
# request 512 columns of tmem, so we know that it starts at 0.
tStS = thr_mma_qk.make_fragment_C(cute.append(qk_acc_shape, self.s_stage))
pv_acc_shape = thr_mma_pv.partition_shape_C(self.mma_tiler_pv[:2])
tOtO = thr_mma_pv.make_fragment_C(cute.append(pv_acc_shape, self.q_stage))
tOtO = cute.make_tensor(tOtO.iterator + self.tmem_o_offset[0], tOtO.layout)
tP = cute.make_tensor(tStS.iterator, tP_layout.outer)
tOrP = thr_mma_pv.make_fragment_A(tP)[None, None, None, 0]
# Need to multiply by width ratio bc tP is in v_dtype but tmem offsets are in FP32
tP_width_ratio = Float32.width // self.v_dtype.width
# Need to adjust the stage stride manually since the two stages aren't contiguous in tmem
tP_stage_stride = (self.tmem_p_offset[1] - self.tmem_p_offset[0]) * tP_width_ratio
tOrP = cute.make_tensor(
tOrP.iterator + self.tmem_p_offset[0] * tP_width_ratio,
cute.append(tOrP.layout, cute.make_layout((self.s_stage,), stride=(tP_stage_stride,)))
)
block_info = BlockInfo(
# This is cta_tiler, not mma_tiler_qk, since we move by block by (2 * mma_tiler[0], mma_tiler[1])
self.cta_tiler[0],
self.cta_tiler[1],
self.is_causal,
self.is_local,
self.is_split_kv,
window_size_left,
window_size_right,
qhead_per_kvhead_packgqa=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
)
SeqlenInfoCls = partial(
SeqlenInfoQK.create,
seqlen_q_static=mQ.shape[0] if const_expr(not self.pack_gqa) else mQ.shape[0][1],
seqlen_k_static=mK.shape[0]
if const_expr(mPageTable is None)
else mK.shape[0] * mPageTable.shape[1],
mCuSeqlensQ=mCuSeqlensQ,
mCuSeqlensK=mCuSeqlensK,
mSeqUsedQ=mSeqUsedQ,
mSeqUsedK=mSeqUsedK,
)
AttentionMaskCls = partial(
AttentionMask,
self.m_block_size,
self.n_block_size,
window_size_left=window_size_left,
window_size_right=window_size_right,
qhead_per_kvhead_packgqa=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
)
TileSchedulerCls = partial(self.tile_scheduler_cls.create, tile_sched_params)
# Cluster wait before tensor memory alloc
pipeline_init_wait(cluster_shape_mn=cta_layout_vmnk)
# ///////////////////////////////////////////////////////////////////////////////
# EMPTY
# ///////////////////////////////////////////////////////////////////////////////
for i in cutlass.range_constexpr(len(self.empty_warp_ids)):
if warp_idx == self.empty_warp_ids[i]:
cute.arch.setmaxregister_decrease(self.num_regs_other)
# ///////////////////////////////////////////////////////////////////////////////
# LOAD
# ///////////////////////////////////////////////////////////////////////////////
if warp_idx >= self.load_warp_ids[0] and warp_idx <= self.load_warp_ids[-1]:
cute.arch.setmaxregister_decrease(self.num_regs_other)
self.load(
thr_mma_qk,
thr_mma_pv,
mQ,
mK,
mV,
sQ,
sK,
sV,
mPageTable,
tma_atom_Q,
tma_atom_K,
tma_atom_V,
pipeline_q,
pipeline_kv,
block_info,
num_splits,
SeqlenInfoCls,
TileSchedulerCls,
blocksparse_tensors,
)
# ///////////////////////////////////////////////////////////////////////////////
# MMA
# ///////////////////////////////////////////////////////////////////////////////
if warp_idx == self.mma_warp_id:
cute.arch.setmaxregister_decrease(self.num_regs_other)
# Alloc tensor memory buffer
tmem.allocate(cute.arch.get_max_tmem_alloc_cols("sm_100"))
tmem.wait_for_alloc()
tmem_ptr = tmem.retrieve_ptr(self.qk_acc_dtype)
self.mma(
tiled_mma_qk,
tiled_mma_pv,
sQ,
sK,
sV,
tStS,
tOtO,
tOrP,
pipeline_q,
pipeline_kv,
pipeline_s_p_o,
pipeline_p_lastsplit,
pipeline_o_acc,
is_leader_cta,
block_info,
num_splits,
SeqlenInfoCls,
TileSchedulerCls,
blocksparse_tensors,
)
# Dealloc the tensor memory buffer
tmem.relinquish_alloc_permit()
tmem.free(tmem_ptr)
# ///////////////////////////////////////////////////////////////////////////////
# Epilogue
# ///////////////////////////////////////////////////////////////////////////////
if const_expr(not self.use_correction_warps_for_epi):
if warp_idx >= self.epilogue_warp_ids[0] and warp_idx <= self.epilogue_warp_ids[-1]:
cute.arch.setmaxregister_decrease(self.num_regs_other)
self.epilogue_s2g(
mO,
sO,
gmem_tiled_copy_O,
tma_atom_O,
pipeline_o_epi,
block_info,
num_splits,
SeqlenInfoCls,
TileSchedulerCls,
mma_tile_coord_v,
)
# ///////////////////////////////////////////////////////////////////////////////
# Softmax
# ///////////////////////////////////////////////////////////////////////////////
if (
(const_expr(self.q_stage == 2) and warp_idx <= self.softmax1_warp_ids[-1]) or
(const_expr(self.q_stage == 1) and warp_idx <= self.softmax0_warp_ids[-1])
):
# increase register after decreasing
cute.arch.setmaxregister_increase(self.num_regs_softmax)
# sync with mma warp before retrieving tmem ptr
tmem.wait_for_alloc()
tmem_ptr = tmem.retrieve_ptr(self.qk_acc_dtype)
softmax_loop = partial(
self.softmax_loop,
softmax_scale_log2=softmax_scale_log2,
softmax_scale=softmax_scale,
thr_mma_qk=thr_mma_qk,
sScale=sScale,
mLSE=mLSE,
pipeline_s_p_o=pipeline_s_p_o,
pipeline_p_lastsplit=pipeline_p_lastsplit,
pipeline_sm_stats=pipeline_sm_stats,
sm_stats_barrier=sm_stats_barrier,
pipeline_s0_s1_sequence=pipeline_s0_s1_sequence,
learnable_sink=learnable_sink,
block_info=block_info,
num_splits=num_splits,
SeqlenInfoCls=SeqlenInfoCls,
AttentionMaskCls=AttentionMaskCls,
TileSchedulerCls=TileSchedulerCls,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
head_divmod=head_divmod,
blocksparse_tensors=blocksparse_tensors,
)
if const_expr(not self.s0_s1_barrier):
stage = Int32(0 if const_expr(self.q_stage == 1) or warp_idx < self.softmax1_warp_ids[0] else 1)
softmax_loop(stage=stage, tStS=tStS)
else:
# If there's s0_s1_barrier, it's faster to have 2 WGs having different code
if warp_idx < self.softmax1_warp_ids[0]:
softmax_loop(stage=0, tStS=tStS)
if warp_idx < self.correction_warp_ids[0] and warp_idx >= self.softmax1_warp_ids[0]:
softmax_loop(stage=1, tStS=tStS)
# ///////////////////////////////////////////////////////////////////////////////
# Correction
# ///////////////////////////////////////////////////////////////////////////////
if warp_idx >= self.correction_warp_ids[0] and warp_idx < self.mma_warp_id:
cute.arch.setmaxregister_decrease(self.num_regs_correction)
# sync with mma warp before retrieving tmem ptr
tmem.wait_for_alloc()
tmem_ptr = tmem.retrieve_ptr(self.qk_acc_dtype)
self.correction_loop(
thr_mma_qk,
thr_mma_pv,
tStS,
tOtO,
sScale,
mO,
mLSE,
sO,
pipeline_s_p_o,
pipeline_o_acc,
pipeline_sm_stats,
sm_stats_barrier,
pipeline_o_epi,
learnable_sink,
gmem_tiled_copy_O,
tma_atom_O,
softmax_scale_log2,
block_info,
num_splits,
SeqlenInfoCls,
TileSchedulerCls,
blocksparse_tensors,
)
return
@cute.jit
def load(
self,
thr_mma_qk: cute.core.ThrMma,
thr_mma_pv: cute.core.ThrMma,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
sQ: cute.Tensor,
sK: cute.Tensor,
sV: cute.Tensor,
mPageTable: Optional[cute.Tensor],
tma_atom_Q: cute.CopyAtom,
tma_atom_K: Optional[cute.CopyAtom],
tma_atom_V: Optional[cute.CopyAtom],
pipeline_q: pipeline.PipelineAsync,
pipeline_kv: pipeline.PipelineAsync,
block_info: BlockInfo,
num_splits: Int32,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
blocksparse_tensors: Optional[BlockSparseTensors],
):
num_load_threads = len(self.load_warp_ids) * cute.arch.WARP_SIZE
tidx = cute.arch.thread_idx()[0] % num_load_threads
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx())
q_producer_phase = Int32(1)
kv_producer_state = pipeline.make_pipeline_state(
pipeline.PipelineUserType.Producer, self.kv_stage
)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
m_block, head_idx, batch_idx, split_idx = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
mQ_cur = seqlen.offset_batch_Q(mQ, batch_idx, dim=3)[None, None, head_idx]
tiler_gQ = ((self.mma_tiler_qk[0] * self.q_stage), self.head_dim_padded)
gQ = cute.local_tile(mQ_cur, tiler_gQ, (m_block, 0)) # (128 * 2, 128)
gQ = layout_utils.select(
cute.flat_divide(gQ, (self.mma_tiler_qk[0],)), mode=[0, 2, 1]
) # (128, 128, 2)
head_idx_kv = (
head_idx // self.qhead_per_kvhead if const_expr(not self.pack_gqa) else head_idx
)
if const_expr(mPageTable is None):
if const_expr(not seqlen.has_cu_seqlens_k):
mK_cur, mV_cur = [t[None, None, head_idx_kv, batch_idx] for t in (mK, mV)]
else:
mK_cur = cute.domain_offset((seqlen.offset_k, 0), mK[None, None, head_idx_kv])
mV_cur = cute.domain_offset((0, seqlen.offset_k), mV[None, None, head_idx_kv])
gK = cute.local_tile(mK_cur, cute.select(self.mma_tiler_qk, mode=[1, 2]), (None, 0))
gV = cute.local_tile(mV_cur, cute.select(self.mma_tiler_pv, mode=[1, 2]), (0, None))
else:
# Need to keep batch coord None since we'll index into it with page idx
mK_cur, mV_cur = [t[None, None, head_idx_kv, None] for t in (mK, mV)]
gK = cute.local_tile(
mK_cur, cute.select(self.mma_tiler_qk, mode=[1, 2]), (None, 0, None)
)
gV = cute.local_tile(
mV_cur, cute.select(self.mma_tiler_pv, mode=[1, 2]), (0, None, None)
)
tSgQ = thr_mma_qk.partition_A(gQ)
tSgK = thr_mma_qk.partition_B(gK)
tOgV = thr_mma_pv.partition_B(gV)
load_Q_fn, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_Q, 0, cute.make_layout(1), tSgQ, sQ
)
if const_expr(self.use_tma_KV):
tKsK, tKgK = cpasync.tma_partition(
tma_atom_K,
0, # no multicast
cute.make_layout(1),
cute.group_modes(sK, 0, 3),
cute.group_modes(tSgK, 0, 3),
)
tVsV, tVgV = cpasync.tma_partition(
tma_atom_V,
0, # no multicast
cute.make_layout(1),
cute.group_modes(sV, 0, 3),
cute.group_modes(tOgV, 0, 3),
)
paged_kv_manager = None
else:
page_size = mK.shape[0]
paged_kv_manager = PagedKVManager.create(
mPageTable,
mK,
mV,
FastDivmodDivisor(page_size),
batch_idx,
head_idx_kv,
tidx,
seqlen.seqlen_k,
0, # leftpad_k
self.n_block_size,
self.head_dim_padded,
self.head_dim_v_padded,
num_load_threads,
mK.element_type,
)
tKsK, tKgK = None, None
tVsV, tVgV = None, None
load_Q = partial(self.load_Q, load_Q_fn, pipeline_q=pipeline_q, phase=q_producer_phase)
load_K = partial(
self.load_KV,
tma_atom_K,
tKgK,
tKsK,
paged_kv_manager,
sK,
pipeline_kv=pipeline_kv,
K_or_V="K",
)
load_V = partial(
self.load_KV,
tma_atom_V,
tVgV,
tVsV,
paged_kv_manager,
sV,
pipeline_kv=pipeline_kv,
K_or_V="V",
)
if const_expr(not self.use_block_sparsity):
n_block_min, n_block_max = block_info.get_n_block_min_max(
seqlen, m_block, split_idx, num_splits
)
if const_expr(not self.is_split_kv) or n_block_min < n_block_max:
n_block_first = n_block_max - 1 if n_block_max > 0 else 0
page_idx = (
mPageTable[batch_idx, n_block_first]
if const_expr(mPageTable is not None and self.use_tma_KV)
else None
)
if const_expr(not self.use_tma_KV):
paged_kv_manager.load_page_table(n_block_first)
load_K(block=n_block_max - 1, producer_state=kv_producer_state, page_idx=page_idx) # K0
# load_K(block=n_block_max - 1, producer_state=kv_producer_state, page_idx=page_idx, extra_tx_count=self.tma_copy_bytes["Q"]) # K0
if const_expr(len(self.load_warp_ids) == 1) or warp_idx == self.load_warp_ids[0]:
# load_Q(block=0, stage=0) # Q0
pipeline_q.producer_acquire_w_index_phase(0, q_producer_phase)
# pipeline_q.sync_object_empty.wait(0, q_producer_phase)
tma_bar_ptr = pipeline_q.sync_object_full.get_barrier(0)
# tma_bar_ptr = pipeline_kv.producer_get_barrier(kv_producer_state)
load_Q_fn(src_idx=0, dst_idx=0, tma_bar_ptr=tma_bar_ptr)
kv_producer_state.advance()
if const_expr(self.q_stage == 2) and (const_expr(len(self.load_warp_ids) == 1) or warp_idx == self.load_warp_ids[0]):
# load_Q(block=1, stage=1) # Q1
pipeline_q.producer_acquire_w_index_phase(1, q_producer_phase)
tma_bar_ptr = pipeline_q.sync_object_full.get_barrier(1)
load_Q_fn(src_idx=1, dst_idx=1, tma_bar_ptr=tma_bar_ptr)
q_producer_phase ^= 1
load_V(block=n_block_max - 1, producer_state=kv_producer_state, page_idx=page_idx) # V0
kv_producer_state.advance()
for i in cutlass.range(n_block_max - 1 - n_block_min, unroll=1):
n_block = n_block_max - 2 - i
page_idx = (
mPageTable[batch_idx, n_block]
if const_expr(mPageTable is not None and self.use_tma_KV)
else None
)
if const_expr(not self.use_tma_KV):
paged_kv_manager.load_page_table(n_block)
# if cute.arch.thread_idx()[0] % 32 == 0: cute.printf("n_block = {}, page_idx = {}", n_block, page_idx)
load_K(block=n_block, producer_state=kv_producer_state, page_idx=page_idx) # Ki
kv_producer_state.advance()
load_V(block=n_block, producer_state=kv_producer_state, page_idx=page_idx) # Vi
kv_producer_state.advance()
else:
kv_producer_state, q_producer_phase = produce_block_sparse_loads_sm100(
blocksparse_tensors,
batch_idx,
head_idx,
m_block,
kv_producer_state,
load_Q,
load_K,
load_V,
pipeline_kv,
self.q_stage,
q_producer_phase,
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
self.q_subtile_factor if self.q_subtile_factor is not None else 1,
)
tile_scheduler.prefetch_next_work()
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
# End of persistent scheduler loop
pipeline_kv.producer_tail(kv_producer_state)
# This is equivalent to pipeline_q.producer_tail
if const_expr(len(self.load_warp_ids) == 1) or warp_idx == self.load_warp_ids[0]:
pipeline_q.producer_acquire_w_index_phase(self.q_stage - 1, q_producer_phase)
@cute.jit
def mma(
self,
tiled_mma_qk: cute.core.ThrMma,
tiled_mma_pv: cute.core.ThrMma,
sQ: cute.Tensor,
sK: cute.Tensor,
sV: cute.Tensor,
tStS: cute.Tensor,
tOtO: cute.Tensor,
tOrP: cute.Tensor,
pipeline_q: pipeline.PipelineAsync,
pipeline_kv: pipeline.PipelineAsync,
pipeline_s_p_o: pipeline.PipelineAsync,
pipeline_p_lastsplit: pipeline.PipelineAsync,
pipeline_o_acc: pipeline.PipelineAsync,
is_leader_cta: Boolean,
block_info: BlockInfo,
num_splits: Int32,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
blocksparse_tensors: Optional[BlockSparseTensors],
):
tSrQ = tiled_mma_qk.make_fragment_A(sQ)
tSrK = tiled_mma_qk.make_fragment_B(sK)
tOrV = tiled_mma_pv.make_fragment_B(sV)
if const_expr(self.q_stage == 2):
tSrQs = (tSrQ[None, None, None, 0], tSrQ[None, None, None, 1])
else:
tSrQs = (tSrQ[None, None, None, 0],)
qk_mma_op, pv_mma_op = tiled_mma_qk.op, tiled_mma_pv.op
qk_mma_idesc, pv_mma_idesc = sm100_desc.mma_op_to_idesc(qk_mma_op), sm100_desc.mma_op_to_idesc(pv_mma_op)
q_smem_base = sm100_desc.smem_desc_base_from_tensor(sQ, sm100_desc.Major.K)
k_smem_base = sm100_desc.smem_desc_base_from_tensor(sK, sm100_desc.Major.K)
v_smem_base = sm100_desc.smem_desc_base_from_tensor(sV, sm100_desc.Major.MN)
q_smem_start = [sm100_desc.make_smem_desc_start_addr(sQ[None, None, None, stage].iterator) for stage in range(self.q_stage)]
sm100_utils.declare_ptx_smem_desc(q_smem_start[self.q_stage - 1], q_smem_base, tSrQ[None, None, None, 0].layout, var_name_prefix="fa_fwd_q_smem_desc")
sm100_utils.declare_ptx_idesc(qk_mma_op, var_name="fa_fwd_qk_mma_idesc")
sm100_utils.declare_ptx_idesc(pv_mma_op, var_name="fa_fwd_pv_mma_idesc")
sQ_stage_stride = (sQ.layout.stride[-1] * sQ.element_type.width // 8) >> 4
if const_expr(self.q_stage == 1):
sQ_stage_stride = 0
gemm_Si = [
partial(
# sm100_utils.gemm_ptx_precomputed,
# self.tmem_s_offset[stage],
# smem_desc_start_a=q_smem_start[stage],
# idesc=qk_mma_idesc,
# smem_desc_base_a=q_smem_base,
# smem_desc_base_b=k_smem_base,
# tCrA_layout=tSrQ[None, None, None, 0].layout,
sm100_utils.gemm_ptx_precomputed_varname,
self.tmem_s_offset[stage],
# idesc=qk_mma_idesc,
smem_desc_base_b=k_smem_base,
tCrB_layout=tSrK[None, None, None, 0].layout,
smem_var_name_prefix=f"fa_fwd_q_smem_desc",
idesc_var_name=f"fa_fwd_qk_mma_idesc",
smem_offset=-sQ_stage_stride if stage == 0 else sQ_stage_stride,
zero_init=True,
cta_group=self.cta_group_size,
)
for stage in range(self.q_stage)
]
# gemm_Si = [
# partial(
# sm100_utils.gemm,
# tiled_mma_qk,
# tStS[None, None, None, stage],
# tCrA=tSrQ[None, None, None, stage],
# zero_init=True,
# )
# for stage in range(self.q_stage)
# ]
gemm_Pi = [
partial(
# sm100_utils.gemm_ptx_precomputed,
sm100_utils.gemm_ptx_partial,
pv_mma_op,
self.tmem_o_offset[stage],
tOrP[None, None, None, stage],
sA=None,
split_arrive=self.split_P_arrive if self.split_P_arrive > 0 else None,
# smem_desc_start_a=tOrP[None, None, None, stage].iterator.toint(),
# smem_desc_start_a=self.tmem_p_offset[stage],
# idesc=pv_mma_idesc,
# smem_desc_base_a=None,
# smem_desc_base_b=v_smem_base,
# tCrA_layout=tOrP[None, None, None, 0].layout,
# tCrB_layout=tOrV[None, None, None, 0].layout
cta_group=self.cta_group_size,
)
for stage in range(self.q_stage)
]
# gemm_Pi = [
# partial(
# sm100_utils.gemm, tOtO[None, None, None, stage], tCrA=tOrP[None, None, None, stage]
# )
# for stage in range(self.q_stage)
# ]
mma_q_consumer_phase = Int32(0)
mma_kv_consumer_state = pipeline.make_pipeline_state(
pipeline.PipelineUserType.Consumer, self.kv_stage
)
P_full_O_rescaled_phase = Int32(0)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
m_block, head_idx, batch_idx, split_idx = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
block_iter_count = Int32(0)
process_tile = False
if const_expr(self.use_block_sparsity):
block_iter_count = get_total_block_count(
blocksparse_tensors,
batch_idx,
head_idx,
m_block,
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
self.q_subtile_factor if self.q_subtile_factor is not None else 1,
)
process_tile = block_iter_count > Int32(0)
else:
n_block_min, n_block_max = block_info.get_n_block_min_max(seqlen, m_block, split_idx, num_splits)
block_iter_count = n_block_max - n_block_min
if const_expr(not self.is_split_kv):
process_tile = True
else:
process_tile = n_block_min < n_block_max
if process_tile and is_leader_cta:
for stage in cutlass.range_constexpr(self.q_stage):
# GEMM_QK00 (Q0 * K0 -> S0) or GEMM_QK01 (Q1 * K0 -> S1)
# 1. wait for Q0 / Q1
pipeline_q.consumer_wait_w_index_phase(stage, mma_q_consumer_phase)
# 2. wait for K0
if const_expr(stage == 0):
pipeline_kv.consumer_wait(mma_kv_consumer_state)
Ki_index, Ki_phase = mma_kv_consumer_state.index, mma_kv_consumer_state.phase
tSrKi = tSrK[None, None, None, Ki_index]
# We don't need to acquire empty S0 / S1.
# For the first iteration, we don't need to wait as we're guaranteed S0 / S1
# are empty. For subsequent iterations, the wait happened at the end
# of the while loop.
# 3. gemm
# sm100_utils.gemm(tiled_mma_qk, tStS[None, None, None, stage], tSrQ[None, None, None, stage], tSrKi, zero_init=True)
sK_cur = sK[None, None, None, Ki_index]
if const_expr(self.uneven_kv_smem):
sK_cur = self.offset_kv_smem(sK_cur, Ki_index, Ki_phase)
# gemm_Si[stage](tCrB=tSrKi, sB=sK_cur)
gemm_Si[stage](
smem_desc_start_b=sm100_desc.make_smem_desc_start_addr(sK_cur.iterator)
)
# gemm_Si[stage](tCrB=tSrKi)
# 4. release S0 / S1
pipeline_s_p_o.producer_commit_w_index(stage)
mma_q_consumer_phase ^= 1
# 5. release K0
pipeline_kv.consumer_release(mma_kv_consumer_state)
mma_kv_consumer_state.advance()
# End of GEMM (Q1 * K0 -> S1)
# Note: Q0 & Q1 are still needed in the seqlen_kv loop
# so we need to release them after the seqlen_kv loop
# O hasn't been accumulated yet, its first MMA calculation doesn't need to accumulate
block_loop_count = block_iter_count - 1
O_should_accumulate = False
for i in cutlass.range(block_loop_count, unroll=1):
# GEMM_PV00 (P0 * V0 -> O0_partial), O0 needs to be accumulated in the seqlen_kv loop
# 1. wait for V0
pipeline_kv.consumer_wait(mma_kv_consumer_state)
mma_kv_release_state = mma_kv_consumer_state.clone()
Vi_index, Vi_phase = mma_kv_consumer_state.index, mma_kv_consumer_state.phase
tOrVi = tOrV[None, None, None, Vi_index]
for stage in cutlass.range_constexpr(self.q_stage):
# 2. acquire corrected O0/O1_partial and P0 / P1
# For the first iteration in this work tile, waiting for O0/O1_partial
# means that the correction warps has finished reading tO during
# the last iteration of the previous work tile.
pipeline_s_p_o.producer_acquire_w_index_phase(stage, P_full_O_rescaled_phase)
# 3. gemm
# sm100_utils.gemm(tiled_mma_pv, tOtO0, tOrP0, tOrVi, zero_init=True)
# gemm_Pi[stage](tCrB=tOrVi, sB=sV[None, None, None, Vi_index], zero_init=not O_should_accumulate)
sV_cur = sV[None, None, None, Vi_index]
if const_expr(self.uneven_kv_smem):
sV_cur = self.offset_kv_smem(sV_cur, Vi_index, Vi_phase)
gemm_Pi[stage](
tCrB=tOrVi,
sB=sV_cur,
# smem_desc_start_b=sm100_desc.make_smem_desc_start_addr(sV_cur.iterator),
zero_init=not O_should_accumulate,
mbar_ptr=pipeline_p_lastsplit.sync_object_full.get_barrier(stage) if self.split_P_arrive > 0 else None,
mbar_phase=P_full_O_rescaled_phase,
)
# Don't need to signal O_full to the correction warps since the
# correction warps wait for the softmax warps anyway. By the time the softmax
# warps finished, S_i for the next iteration must have been done, so O_i-1
# must have been done as well.
# pipeline_o_acc.producer_commit_w_index(stage)
# 4. release V(i-1)
if const_expr(stage == self.q_stage - 1):
pipeline_kv.consumer_release(mma_kv_release_state)
mma_kv_release_state.advance()
# End of GEMM_PV00 (P0 * V0 -> O0_partial)
# GEMM_QK0i (Q0 * Ki -> S0)
# 1. wait for Ki
if const_expr(stage == 0):
mma_kv_consumer_state.advance()
pipeline_kv.consumer_wait(mma_kv_consumer_state)
Ki_index, Ki_phase = mma_kv_consumer_state.index, mma_kv_consumer_state.phase
# 2. gemm
# Don't need to wait for the softmax warp to have finished reading the previous
# Si, since this gemm is scheduled after the PV gemm, which guaranteed that Si
# has been read and Pi has been written.
# sm100_utils.gemm(tiled_mma_qk, tStS[None, None, None, stage], tSrQ[None, None, None, stage], tSrK[None, None, None, Ki_index], zero_init=True)
sK_cur = sK[None, None, None, Ki_index]
if const_expr(self.uneven_kv_smem):
sK_cur = self.offset_kv_smem(sK_cur, Ki_index, Ki_phase)
# gemm_Si[stage](tCrB=tSrK[None, None, None, Ki_index], sB=sK_cur)
gemm_Si[stage](
smem_desc_start_b=sm100_desc.make_smem_desc_start_addr(sK_cur.iterator)
)
# gemm_Si[stage](tCrB=tSrK[None, None, None, Ki_index])
# 3. release S0 / S1
pipeline_s_p_o.producer_commit_w_index(stage)
# End of GEMM_QK0i (Q0 * Ki -> S0)
# 4. release Ki
pipeline_kv.consumer_release(mma_kv_consumer_state)
mma_kv_consumer_state.advance()
P_full_O_rescaled_phase ^= 1
O_should_accumulate = True
# End of seqlen_kv loop
# release Q0 & Q1
for stage in cutlass.range(self.q_stage):
pipeline_q.consumer_release_w_index(stage)
# GEMM_PV00 (P0 * V0 -> O0_partial), O0 needs to be accumulated in the seqlen_kv loop
# 1. wait for V0
pipeline_kv.consumer_wait(mma_kv_consumer_state)
Vi_index, Vi_phase = mma_kv_consumer_state.index, mma_kv_consumer_state.phase
tOrVi = tOrV[None, None, None, Vi_index]
for stage in cutlass.range_constexpr(self.q_stage):
# 2. acquire corrected Oi_partial and Pi
pipeline_s_p_o.producer_acquire_w_index_phase(stage, P_full_O_rescaled_phase)
# 3. gemm
# sm100_utils.gemm(tiled_mma_pv, tOtO0, tOrP0, tOrVi, zero_init=True)
# gemm_Pi[stage](tCrB=tOrVi, sB=sV[None, None, None, Vi_index], zero_init=not O_should_accumulate)
sV_cur = sV[None, None, None, Vi_index]
if const_expr(self.uneven_kv_smem):
sV_cur = self.offset_kv_smem(sV_cur, Vi_index, Vi_phase)
gemm_Pi[stage](
tCrB=tOrVi,
sB=sV_cur,
# smem_desc_start_b=sm100_desc.make_smem_desc_start_addr(sV_cur.iterator),
zero_init=not O_should_accumulate,
mbar_ptr=pipeline_p_lastsplit.sync_object_full.get_barrier(stage) if self.split_P_arrive > 0 else None,
mbar_phase=P_full_O_rescaled_phase,
)
# 4. release accumulated O0_partial
# We do need O_full here since for the last tile, by the time the softmax warp
# has signaled to the correction warps, the softmax warp has just finished
# computing the row sum of the current tile. It does not guarantee that the 1st
# tile of the next work tile has been computed yet.
pipeline_o_acc.producer_commit_w_index(stage)
# End of GEMM_PV00 (P0 * V0 -> O0_partial)
P_full_O_rescaled_phase ^= 1
# 5. release Vi_end
pipeline_kv.consumer_release(mma_kv_consumer_state)
mma_kv_consumer_state.advance()
# End of GEMM_PV1(i_end) (P1 * Vi_end -> O1)
# Advance to next tile
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
# End of persistent scheduler loop
# We don't need pipeline_s_p_o.producer_tail() since there's no dangling mbarrier at the end
# pipeline_s_p_o.producer_acquire_w_index_phase(self.q_stage - 1, P_full_O_rescaled_phase)
# We don't need pipeline_o_acc.producer_tail() since we don't call
# pipeline_o_acc.producer_acquire() inside the loop.
# for both softmax0 and softmax1 warp group
@cute.jit
def softmax_loop(
self,
stage: int | Int32,
softmax_scale_log2: Float32,
softmax_scale: Float32,
thr_mma_qk: cute.core.ThrMma,
tStS: cute.Tensor, # ((TILE_M, TILE_N), 1, 1, q_stage)
sScale: cute.Tensor,
mLSE: Optional[cute.Tensor],
pipeline_s_p_o: pipeline.PipelineAsync,
pipeline_p_lastsplit: pipeline.PipelineAsync,
pipeline_sm_stats: pipeline.PipelineAsync,
sm_stats_barrier: pipeline.NamedBarrier,
pipeline_s0_s1_sequence: Optional[pipeline.PipelineAsync],
learnable_sink: Optional[cute.Tensor],
block_info: BlockInfo,
num_splits: Int32,
SeqlenInfoCls: Callable,
AttentionMaskCls: Callable,
TileSchedulerCls: Callable,
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
head_divmod=None,
blocksparse_tensors: Optional[BlockSparseTensors] = None,
):
"""Compute softmax on attention scores from QK matrix multiplication.
This method handles the softmax computation for either the first or second half of the
attention matrix, depending on the 'stage' parameter. It calculates row-wise maximum
and sum values needed for stable softmax computation, applies optional masking, and
transforms raw attention scores into probability distributions.
The implementation uses specialized memory access patterns and efficient math operations
for computing exp(x) using exp2 functions. It also coordinates pipeline
synchronization between MMA, correction, and sequence processing stages.
"""
tidx = cute.arch.thread_idx()[0] % (
cute.arch.WARP_SIZE
# * (len(self.softmax0_warp_ids) if stage == 0 else len(self.softmax1_warp_ids)
* (len(self.softmax0_warp_ids))
)
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx()) % 4
cta_qk_tiler = (self.mma_tiler_qk[0] // thr_mma_qk.thr_id.shape, self.mma_tiler_qk[1])
tSAcc = tStS[(None, None), 0, 0, stage] # (128, 128)
tStScale = cute.composition(tSAcc, cute.make_layout((self.m_block_size, 1)))
tScS = thr_mma_qk.partition_C(cute.make_identity_tensor(self.mma_tiler_qk[:2]))
tScS = tScS[(None, None), 0, 0] # (128, 128)
tScScale = cute.composition(tScS, cute.make_layout((self.m_block_size, 1)))
tilePlikeFP32 = self.mma_tiler_qk[1] // Float32.width * self.v_dtype.width
tStP_layout = cute.composition(
tSAcc.layout, cute.make_layout((self.m_block_size, tilePlikeFP32))
)
tStP = cute.make_tensor(tSAcc.iterator + self.tmem_s_to_p_offset, tStP_layout)
tmem_load_atom = cute.make_copy_atom(
tcgen05.copy.Ld32x32bOp(tcgen05.copy.Repetition(32)), self.qk_acc_dtype
)
thr_tmem_load = tcgen05.make_tmem_copy(tmem_load_atom, tSAcc).get_slice(tidx)
tStS_t2r = thr_tmem_load.partition_S(tSAcc) # (((32,32),1),1,4)
tmem_store_scale_atom = cute.make_copy_atom(
tcgen05.copy.St32x32bOp(tcgen05.copy.Repetition(1)), Float32
)
thr_tmem_store_scale = tcgen05.make_tmem_copy(tmem_store_scale_atom, tStScale).get_slice(
tidx
)
tStScale_r2t = thr_tmem_store_scale.partition_D(tStScale)
tmem_store_atom = cute.make_copy_atom(
tcgen05.copy.St32x32bOp(tcgen05.copy.Repetition(16)), Float32
)
thr_tmem_store = tcgen05.make_tmem_copy(tmem_store_atom, tStP).get_slice(tidx)
tStP_r2t = thr_tmem_store.partition_D(tStP) # (((16,32),1),1,4)
mma_si_consumer_phase = Int32(0)
sm_stats_producer_phase = Int32(1)
s0_s1_sequence_phase = Int32(1 if stage == 0 else 0)
# self.warp_scheduler_barrier_init()
warp_idx_in_wg = cute.arch.make_warp_uniform(cute.arch.warp_idx()) % 4
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
m_block, head_idx, batch_idx, split_idx = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
n_block_min, n_block_max = block_info.get_n_block_min_max(seqlen, m_block, split_idx, num_splits)
mask = AttentionMaskCls(seqlen)
shared_mask_kwargs = dict(
m_block=(self.q_stage * m_block + stage) * self.cta_group_size,
thr_mma=thr_mma_qk,
thr_tmem_load=thr_tmem_load,
mask_causal=self.is_causal,
mask_local=self.is_local,
batch_idx=batch_idx,
head_idx=head_idx,
aux_tensors=aux_tensors,
)
# Recompute fastdiv_mods if necessary
recompute_fastdiv_mods_q = cutlass.const_expr(
aux_tensors is not None and (seqlen.has_cu_seqlens_q or seqlen.has_seqused_q)
)
recompute_fastdiv_mods_k = cutlass.const_expr(
aux_tensors is not None and (seqlen.has_cu_seqlens_k or seqlen.has_seqused_k)
)
if cutlass.const_expr(fastdiv_mods is not None):
seqlen_q_divmod, seqlen_k_divmod = fastdiv_mods
fastdiv_mods = (
seqlen_q_divmod
if not recompute_fastdiv_mods_q
else FastDivmodDivisor(seqlen.seqlen_q),
seqlen_k_divmod
if not recompute_fastdiv_mods_k
else FastDivmodDivisor(seqlen.seqlen_k),
)
mask_mod = self.mask_mod if const_expr(self.mask_mod is not None) else None
mask_fn = partial(
mask.apply_mask_sm100,
mask_mod=mask_mod,
fastdiv_mods=fastdiv_mods,
head_divmod=head_divmod,
**shared_mask_kwargs,
)
if const_expr(self.use_block_sparsity):
# Full blocks dont need mask_mod
mask_fn_none = partial(
mask.apply_mask_sm100,
mask_mod=None,
fastdiv_mods=fastdiv_mods,
head_divmod=head_divmod,
**shared_mask_kwargs,
)
else:
mask_fn_none = None
softmax = SoftmaxSm100.create(
softmax_scale_log2,
rescale_threshold=8.0 if const_expr(self.q_dtype.width == 16) else 0.0,
softmax_scale=softmax_scale,
)
softmax.reset()
if const_expr(self.use_block_sparsity):
tile_block_count = get_total_block_count(
blocksparse_tensors,
batch_idx,
head_idx,
m_block,
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
self.q_subtile_factor if self.q_subtile_factor is not None else 1,
)
has_work = tile_block_count > Int32(0)
else:
tile_block_count = n_block_max - n_block_min
has_work = const_expr(not self.is_split_kv) or tile_block_count > Int32(0)
softmax_step = partial(
self.softmax_step,
softmax=softmax,
thr_mma_qk=thr_mma_qk,
pipeline_s_p_o=pipeline_s_p_o,
pipeline_p_lastsplit=pipeline_p_lastsplit,
pipeline_sm_stats=pipeline_sm_stats,
sm_stats_barrier=sm_stats_barrier,
pipeline_s0_s1_sequence=pipeline_s0_s1_sequence,
thr_tmem_load=thr_tmem_load,
thr_tmem_store=thr_tmem_store,
thr_tmem_store_scale=thr_tmem_store_scale,
tStS_t2r=tStS_t2r,
tStScale_r2t=tStScale_r2t,
tStP_r2t=tStP_r2t,
sScale=sScale,
stage=stage,
batch_idx=batch_idx,
head_idx=head_idx,
m_block=(self.q_stage * m_block + stage) * self.cta_group_size,
seqlen=seqlen,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
head_divmod=head_divmod,
)
if const_expr(self.use_block_sparsity) or has_work:
# See block_sparse_utils.py NOTE [SM100 block-sparse empty tiles: mbarrier contract].
pipeline_sm_stats.producer_acquire_w_index_phase(stage, sm_stats_producer_phase)
sm_stats_producer_phase ^= 1
# Block sparse or dense iteration
if const_expr(self.use_block_sparsity):
# When aux_tensors exist, Q indices beyond seqlen_q must be wrapped to avoid
# OOB aux_tensor access. Only edge tiles (where m_tile_end > seqlen_q) need this.
if const_expr(aux_tensors is not None):
m_tile_end = ((self.q_stage * m_block + stage + 1) * self.cta_group_size) * self.m_block_size
check_m_boundary = m_tile_end > seqlen.seqlen_q
else:
check_m_boundary = False
(
mma_si_consumer_phase,
sm_stats_producer_phase,
s0_s1_sequence_phase,
empty_tile,
) = softmax_block_sparse_sm100(
blocksparse_tensors,
batch_idx,
head_idx,
m_block,
softmax_step,
mask_fn,
mask_fn_none,
mma_si_consumer_phase,
sm_stats_producer_phase,
s0_s1_sequence_phase,
pipeline_sm_stats,
sm_stats_barrier,
self.q_stage,
Int32(stage),
check_m_boundary,
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
self.q_subtile_factor if self.q_subtile_factor is not None else 1,
)
if not empty_tile:
sScale[tidx + stage * self.m_block_size] = softmax.row_sum[0]
if const_expr(mLSE is not None or learnable_sink is not None):
sScale[
tidx + stage * self.m_block_size + self.q_stage * self.m_block_size
] = softmax.row_max[0]
# if tidx == 0:
# cute.printf("softmax row sum stage %d: %f, row_max = %f\n", stage, softmax.row_sum[0], softmax.row_max[0])
# See block_sparse_utils.py NOTE [SM100 block-sparse empty tiles: mbarrier contract].
# pipeline_sm_stats.producer_commit_w_index(stage)
sm_stats_barrier.arrive_w_index(index=stage * 4 + warp_idx)
# if tidx == 0: cute.printf("softmax row sum stage %d: %f\n", stage, softmax.row_sum[0])
else:
if const_expr(not self.is_split_kv) or tile_block_count > Int32(0):
mma_si_consumer_phase, sm_stats_producer_phase, s0_s1_sequence_phase = softmax_step(
mma_si_consumer_phase,
sm_stats_producer_phase,
s0_s1_sequence_phase,
n_block_max - 1,
is_first=True,
mask_fn=partial(mask_fn, mask_seqlen=True),
)
n_block_max -= 1
# Next couple of iterations with causal masking
if const_expr(self.is_causal or self.is_local):
n_block_min_causal_local_mask = block_info.get_n_block_min_causal_local_mask(
seqlen, m_block, n_block_min
)
for n_tile in cutlass.range(n_block_max - n_block_min_causal_local_mask, unroll=1):
n_block = n_block_max - 1 - n_tile
mma_si_consumer_phase, sm_stats_producer_phase, s0_s1_sequence_phase = (
softmax_step(
mma_si_consumer_phase,
sm_stats_producer_phase,
s0_s1_sequence_phase,
n_block,
mask_fn=partial(mask_fn, mask_seqlen=False),
)
)
n_block_max = cutlass.min(n_block_max, n_block_min_causal_local_mask)
# The remaining iterations have no masking (but may still need mask_mod)
n_block_min_before_local_mask = block_info.get_n_block_min_before_local_mask(
seqlen, m_block, n_block_min
)
for n_tile in cutlass.range(n_block_max - n_block_min_before_local_mask, unroll=1):
n_block = n_block_max - n_tile - 1
if const_expr(self.mask_mod is not None):
mma_si_consumer_phase, sm_stats_producer_phase, s0_s1_sequence_phase = softmax_step(
mma_si_consumer_phase, sm_stats_producer_phase, s0_s1_sequence_phase, n_block,
mask_fn=partial(mask_fn, mask_seqlen=False),
)
else:
mma_si_consumer_phase, sm_stats_producer_phase, s0_s1_sequence_phase = softmax_step(
mma_si_consumer_phase, sm_stats_producer_phase, s0_s1_sequence_phase, n_block,
)
# Separate iterations with local masking on the left
if const_expr(self.is_local and block_info.window_size_left is not None):
n_block_max = cutlass.min(n_block_max, n_block_min_before_local_mask)
for n_tile in cutlass.range(0, n_block_max - n_block_min, unroll=1):
n_block = n_block_max - 1 - n_tile
mma_si_consumer_phase, sm_stats_producer_phase, s0_s1_sequence_phase = (
softmax_step(
mma_si_consumer_phase,
sm_stats_producer_phase,
s0_s1_sequence_phase,
n_block,
mask_fn=partial(mask_fn, mask_seqlen=False),
)
)
# Now that we no longer already have the 1st iteration, need mask_seqlen=True here
# Dense path always writes scale / signals
sScale[tidx + stage * self.m_block_size] = softmax.row_sum[0]
if const_expr(mLSE is not None or learnable_sink is not None):
sScale[
tidx + stage * self.m_block_size + self.q_stage * self.m_block_size
] = softmax.row_max[0]
# pipeline_sm_stats.producer_commit_w_index(stage)
sm_stats_barrier.arrive_w_index(index=stage * 4 + warp_idx)
# # Write LSE to gmem
# if const_expr(mLSE is not None):
# acc_O_mn_row_is_zero_or_nan = softmax.row_sum[0] == 0.0 or softmax.row_sum[0] != softmax.row_sum[0]
# scale = (
# cute.arch.rcp_approx(softmax.row_sum[0] if not acc_O_mn_row_is_zero_or_nan else 1.0)
# )
# LN2 = math.log(2.0)
# lse = (
# (softmax.row_max[0] * softmax.scale_log2 + cute.math.log2(softmax.row_sum[0], fastmath=True)) * LN2
# if not acc_O_mn_row_is_zero_or_nan else -Float32.inf
# )
# if const_expr(not seqlen.has_cu_seqlens_q):
# mLSE_cur = mLSE[None, head_idx, batch_idx]
# else:
# mLSE_cur = cute.domain_offset((seqlen.offset_q,), mLSE[None, head_idx])
# gLSE = cute.local_tile(mLSE_cur, (self.m_block_size,), (m_block * 2 + stage,))
# if tidx < seqlen.seqlen_q - (m_block * 2 + stage) * self.m_block_size:
# gLSE[tidx] = lse
# Advance to next tile
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
# End of persistent scheduler loop
# This is equivalent to pipeline_sm_stats.producer_tail
pipeline_sm_stats.producer_acquire_w_index_phase(stage, sm_stats_producer_phase)
# This is equivalent to pipeline_s0_s1.producer_tail
if const_expr(self.s0_s1_barrier):
if stage == 0:
pipeline_s0_s1_sequence.sync_object_full.wait(stage, s0_s1_sequence_phase)
@cute.jit
def softmax_step(
self,
mma_si_consumer_phase: Int32,
sm_stats_producer_phase: Int32,
s0_s1_sequence_phase: Int32,
n_block: Int32,
softmax: SoftmaxSm100,
thr_mma_qk: cute.core.ThrMma,
pipeline_s_p_o: pipeline.PipelineAsync,
pipeline_p_lastsplit: pipeline.PipelineAsync,
pipeline_sm_stats: pipeline.PipelineAsync,
sm_stats_barrier: pipeline.NamedBarrier,
pipeline_s0_s1_sequence: Optional[pipeline.PipelineAsync],
thr_tmem_load: cute.CopyAtom,
thr_tmem_store: cute.CopyAtom,
thr_tmem_store_scale: cute.CopyAtom,
tStS_t2r: cute.Tensor,
tStScale_r2t: cute.Tensor,
tStP_r2t: cute.Tensor,
sScale: cute.Tensor,
stage: int | Int32,
batch_idx: Int32,
head_idx: Int32,
m_block: Int32,
seqlen,
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
head_divmod=None,
mask_fn: Optional[Callable] = None,
is_first: bool = False,
) -> Tuple[cute.Int32, cute.Int32, cute.Int32]:
"""Perform a single step of the softmax computation on a block of attention scores.
This method processes one block of the attention matrix, computing numerically stable
softmax by first finding the row maximum, subtracting it from all elements, applying
exponential function, and then normalizing by the sum of exponentials. It also handles
optional masking of attention scores.
The method involves several key operations:
1. Loading attention scores from tensor memory
2. Applying optional masking based on position
3. Computing row-wise maximum values for numerical stability
4. Transforming scores using exp2(x*scale - max*scale)
5. Computing row sums for normalization
6. Coordinating pipeline synchronization between different processing stages
"""
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx()) % 4
tilePlikeFP32 = self.mma_tiler_qk[1] // Float32.width * self.v_dtype.width
tScS = thr_mma_qk.partition_C(cute.make_identity_tensor(self.mma_tiler_qk[:2]))
tScS = tScS[(None, None), 0, 0] # (128, 128)
# tScScale = cute.composition(tScS, cute.make_layout((self.m_block_size, 1)))
cta_qk_tiler = (self.mma_tiler_qk[0] // thr_mma_qk.thr_id.shape, self.mma_tiler_qk[1])
tScS_shape = cta_qk_tiler # (128, 128)
tScP_shape = (tScS_shape[0], tilePlikeFP32) # (128, 64)
# Wait for Si
pipeline_s_p_o.consumer_wait_w_index_phase(stage, mma_si_consumer_phase)
tSrS_t2r = cute.make_fragment(thr_tmem_load.partition_D(tScS).shape, self.qk_acc_dtype)
cute.copy(thr_tmem_load, tStS_t2r, tSrS_t2r)
# tSrS_t2r = copy_utils.load_t2r(thr_tmem_load, tScS_shape, tStS_t2r)
if cutlass.const_expr(self.score_mod is not None):
self.apply_score_mod(
tSrS_t2r,
thr_tmem_load,
thr_mma_qk,
batch_idx,
head_idx,
m_block,
n_block,
softmax,
seqlen,
aux_tensors,
fastdiv_mods,
head_divmod,
)
if const_expr(mask_fn is not None):
mask_fn(tSrS_t2r, n_block=n_block)
row_max, acc_scale = softmax.update_row_max(tSrS_t2r.load(), is_first)
if const_expr(not is_first):
# tSrScale_r2t = cute.make_fragment(thr_tmem_store_scale.partition_S(tScScale).shape, Float32)
# tSrScale_r2t[0] = acc_scale
# cute.copy(thr_tmem_store_scale, tSrScale_r2t, tStScale_r2t)
# cute.arch.fence_view_async_tmem_store()
thread_idx = thr_tmem_load.thr_idx
sScale[thread_idx + stage * self.m_block_size] = acc_scale
# if thread_idx == 0: cute.printf("softmax acc_scale stage %d: %f, row_max = %f\n", stage, acc_scale, row_max)
# Notify correction wg that row_max is ready
# pipeline_sm_stats.producer_commit_w_index(stage)
sm_stats_barrier.arrive_w_index(index=stage * 4 + warp_idx)
# if thread_idx == 0 and stage == 0: cute.print_tensor(tSrS_t2r)
softmax.scale_subtract_rowmax(tSrS_t2r, row_max)
# Sequence barrier wait
if const_expr(self.s0_s1_barrier):
pipeline_s0_s1_sequence.sync_object_full.wait(stage, s0_s1_sequence_phase)
tSrP_r2t_f32 = cute.make_fragment(
thr_tmem_store.partition_S(cute.make_identity_tensor(tScP_shape)).shape, Float32
)
tSrP_r2t = cute.make_tensor(
cute.recast_ptr(tSrP_r2t_f32.iterator, dtype=self.q_dtype), tSrS_t2r.layout
)
# softmax.scale_apply_exp2_convert(tSrS_t2r, row_max, tSrP_r2t)
softmax.apply_exp2_convert(
tSrS_t2r,
tSrP_r2t,
ex2_emu_freq=self.ex2_emu_freq if const_expr(mask_fn is None) else 0,
ex2_emu_start_frg=self.ex2_emu_start_frg,
)
# Sequence barrier arrive
if const_expr(self.s0_s1_barrier):
pipeline_s0_s1_sequence.sync_object_full.arrive(1 - stage, dst=None)
# print(tSrP_r2t_f32, tStP_r2t)
# cute.copy(thr_tmem_store, tSrP_r2t_f32, tStP_r2t)
for i in cutlass.range_constexpr(cute.size(tStP_r2t.shape[2])):
cute.copy(thr_tmem_store, tSrP_r2t_f32[None, None, i], tStP_r2t[None, None, i])
if const_expr(self.split_P_arrive > 0):
split_P_arrive_idx = cute.size(tStP_r2t.shape[2]) * self.split_P_arrive // self.n_block_size
if const_expr(i + 1 == split_P_arrive_idx):
# Notify mma warp that the 1st half of P is ready
cute.arch.fence_view_async_tmem_store()
pipeline_s_p_o.consumer_release_w_index(stage)
# Notify mma warp that the 2nd half of P is ready
cute.arch.fence_view_async_tmem_store()
if const_expr(self.split_P_arrive > 0):
cute.arch.sync_warp()
with cute.arch.elect_one():
pipeline_p_lastsplit.producer_commit_w_index(stage)
else:
pipeline_s_p_o.consumer_release_w_index(stage)
pipeline_sm_stats.producer_acquire_w_index_phase(stage, sm_stats_producer_phase)
softmax.update_row_sum(tSrS_t2r.load(), acc_scale, is_first)
# acc_scale = cute.math.exp2(acc_scale_, fastmath=True)
return mma_si_consumer_phase ^ 1, sm_stats_producer_phase ^ 1, s0_s1_sequence_phase ^ 1
@cute.jit
def correction_loop(
self,
thr_mma_qk: cute.core.ThrMma,
thr_mma_pv: cute.core.ThrMma,
tStS: cute.Tensor,
tOtO: cute.Tensor,
sScale: cute.Tensor,
mO: cute.Tensor,
mLSE: cute.Tensor,
sO: cute.Tensor,
pipeline_s_p_o: pipeline.PipelineAsync,
pipeline_o_acc: pipeline.PipelineAsync,
pipeline_sm_stats: pipeline.PipelineAsync,
sm_stats_barrier: pipeline.NamedBarrier,
pipeline_o_epi: pipeline.PipelineAsync,
learnable_sink: Optional[cute.Tensor],
gmem_tiled_copy_O: cute.TiledCopy,
tma_atom_O: cute.CopyAtom,
softmax_scale_log2: Float32,
block_info: BlockInfo,
num_splits: Int32,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
blocksparse_tensors: Optional[BlockSparseTensors] = None,
):
tidx = cute.arch.thread_idx()[0] % (cute.arch.WARP_SIZE * len(self.correction_warp_ids))
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx()) % 4
mma_tile_coord_v = thr_mma_qk.thr_idx
tScS = thr_mma_qk.partition_C(cute.make_identity_tensor(self.mma_tiler_qk[:2]))
tStScale_layout = cute.composition(tStS.layout, cute.make_layout((self.m_block_size, 1)))
tStScales = tuple(
cute.make_tensor(tStS.iterator + self.tmem_vec_offset[stage], tStScale_layout)
for stage in range(self.q_stage)
)
tScScale = cute.composition(tScS, cute.make_layout((self.m_block_size, 1)))
tmem_load_v_atom = cute.make_copy_atom(
tcgen05.copy.Ld32x32bOp(tcgen05.copy.Repetition(1)), self.qk_acc_dtype
)
thr_tmem_load_vec = tcgen05.make_tmem_copy(tmem_load_v_atom, tStScales[0]).get_slice(tidx)
tStScales_t2r = [thr_tmem_load_vec.partition_S(tStScales[stage]) for stage in range(self.q_stage)]
tSrScale_t2r_shape = thr_tmem_load_vec.partition_D(tScScale).shape
# First iter: no correction is required
# Notify mma warp that O has been rescaled
for stage in cutlass.range(self.q_stage):
pipeline_s_p_o.consumer_release_w_index(stage)
sm_stats_consumer_phase = Int32(0)
o_corr_consumer_phase = Int32(0)
corr_epi_producer_phase = Int32(1)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
m_block, head_idx, batch_idx, split_idx = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
n_block_min, n_block_max = block_info.get_n_block_min_max(seqlen, m_block, split_idx, num_splits)
if const_expr(self.is_split_kv):
mO_cur = seqlen.offset_batch_Q(mO, batch_idx, dim=3)[None, None, head_idx, split_idx]
else:
mO_cur = seqlen.offset_batch_Q(mO, batch_idx, dim=3)[None, None, head_idx]
tiler_gO = ((self.mma_tiler_pv[0] * self.q_stage), self.head_dim_v_padded)
gO = cute.local_tile(mO_cur, tiler_gO, (m_block, 0)) # (128 * 2, 128)
gO = layout_utils.select(
cute.flat_divide(gO, (self.mma_tiler_pv[0],)), mode=[0, 2, 1]
) # (128, 128, 2)
gO = cute.flat_divide(gO, (self.mma_tiler_pv[0] // self.cta_group_size,))[None, mma_tile_coord_v, None, None]
# Default LSE to -inf for invalid split_idx tiles
stats = [(0.0, -Float32.inf if const_expr(mLSE is not None or learnable_sink is not None) else None, True)] * self.q_stage
if const_expr(self.use_block_sparsity):
total_block_count = get_total_block_count(
blocksparse_tensors,
batch_idx,
head_idx,
m_block,
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
self.q_subtile_factor if self.q_subtile_factor is not None else 1,
)
has_work = total_block_count > Int32(0)
else:
total_block_count = n_block_max - n_block_min
has_work = const_expr(not self.is_split_kv) or total_block_count > Int32(0)
if has_work:
# Ignore first signal from softmax as no correction is required
# pipeline_sm_stats.consumer_wait_w_index_phase(0, sm_stats_consumer_phase)
sm_stats_barrier.arrive_and_wait_w_index(index=0 * 4 + warp_idx)
pipeline_sm_stats.consumer_release_w_index(0)
if const_expr(self.q_stage == 2):
# pipeline_sm_stats.consumer_wait_w_index_phase(1, sm_stats_consumer_phase)
sm_stats_barrier.arrive_and_wait_w_index(index=1 * 4 + warp_idx)
sm_stats_consumer_phase ^= 1
tSrScale_t2r = cute.make_fragment(tSrScale_t2r_shape, Float32)
for i in cutlass.range(total_block_count - 1, unroll=1):
for stage in cutlass.range_constexpr(self.q_stage):
# wait for S0 / S1
# pipeline_sm_stats.consumer_wait_w_index_phase(stage, sm_stats_consumer_phase)
sm_stats_barrier.arrive_and_wait_w_index(index=stage * 4 + warp_idx)
# cute.copy(tiled_tmem_load_vec, tStScales_t2r[stage], tSrScale_t2r)
# cute.arch.fence_view_async_tmem_load()
# scale = tSrScale_t2r[0]
scale = sScale[tidx + stage * self.m_block_size]
should_rescale = cute.arch.vote_ballot_sync(scale < 1.0) != 0
# should_rescale = True
# if tidx == 0: cute.printf("Correction scale i = %d, for stage %d: %f, should_rescale = %d\n", i, stage, scale, should_rescale)
# Don't need O_full anymore, since by the time softmax has signaled the correction
# warps, S_i must have been done, so O_i-1 must have been done as well.
# pipeline_o_acc.consumer_wait_w_index_phase(stage, o_corr_consumer_phase)
if should_rescale:
self.correction_rescale(thr_mma_pv, tOtO[None, None, None, stage], tidx, scale)
# Notify mma warp that O has been rescaled
pipeline_s_p_o.consumer_release_w_index(stage)
pipeline_sm_stats.consumer_release_w_index(self.q_stage - 1 - stage)
sm_stats_consumer_phase ^= 1
# o_corr_consumer_phase ^= 1
if const_expr(self.q_stage == 2):
pipeline_sm_stats.consumer_release_w_index(1)
# End of seqlen_corr_loop_steps
# Even in the case of self.overlap_sO_sQ, we can write to stage 0 of sO without
# additional sync because the MMA in the top half must have been done.
# Similarly we can write to stage 1 of sO without additional sync.
learnable_sink_val = [None] * self.q_stage
if const_expr(learnable_sink is not None):
if const_expr(not self.pack_gqa):
sink_val = Float32(learnable_sink[head_idx])
learnable_sink_val = [sink_val] * self.q_stage
else: # Each thread might have a different sink value due to different q_head
for stage in cutlass.range_constexpr(self.q_stage):
q_head_idx = (
((m_block * self.q_stage + stage) * self.cta_group_size + mma_tile_coord_v) * self.m_block_size + tidx
) % self.qhead_per_kvhead + head_idx * self.qhead_per_kvhead
learnable_sink_val[stage] = Float32(learnable_sink[q_head_idx])
for stage in cutlass.range_constexpr(self.q_stage):
# pipeline_sm_stats.consumer_wait_w_index_phase(stage, sm_stats_consumer_phase)
sm_stats_barrier.arrive_and_wait_w_index(index=stage * 4 + warp_idx)
# cute.copy(tiled_tmem_load_vec, tStScales_t2r[stage], tSrScale_t2r)
# cute.arch.fence_view_async_tmem_load()
# scale = tSrScale_t2r[0]
row_sum = sScale[tidx + stage * self.m_block_size]
if const_expr(mLSE is not None or learnable_sink is not None):
row_max = sScale[tidx + stage * self.m_block_size + self.q_stage * self.m_block_size]
else:
row_max = None
pipeline_sm_stats.consumer_release_w_index(stage)
if const_expr(learnable_sink is not None):
LOG2_E = math.log2(math.e)
sink_val = learnable_sink_val[stage]
if const_expr(not self.is_split_kv) or split_idx == 0:
if row_max == -Float32.inf:
# It's possible to have an empty row with splitKV.
row_max = sink_val * (LOG2_E / softmax_scale_log2)
row_sum = Float32(1.0)
else:
row_sum += cute.math.exp2(
sink_val * LOG2_E - row_max * softmax_scale_log2, fastmath=True
)
acc_O_mn_row_is_zero_or_nan = row_sum == 0.0 or row_sum != row_sum
stats[stage] = (row_sum, row_max, acc_O_mn_row_is_zero_or_nan)
scale = cute.arch.rcp_approx(row_sum if not acc_O_mn_row_is_zero_or_nan else 1.0)
# Wait for the last O to be ready from the MMA warp
pipeline_o_acc.consumer_wait_w_index_phase(stage, o_corr_consumer_phase)
if const_expr(not self.use_correction_warps_for_epi):
pipeline_o_epi.producer_acquire_w_index_phase(stage, corr_epi_producer_phase)
self.correction_epilogue(
thr_mma_pv,
tOtO[None, None, None, stage],
tidx,
stage,
m_block,
seqlen.seqlen_q,
scale,
sO[None, None, stage],
mO_cur,
gO[None, None, stage],
gmem_tiled_copy_O,
)
# Signal for the next work tile that O buffers in tmem are already read, so
# mma warp can write to them
pipeline_s_p_o.consumer_release_w_index(stage)
if const_expr(not self.use_correction_warps_for_epi):
pipeline_o_epi.producer_commit_w_index(stage)
# if tidx == 0: cute.printf("Correction final scale for stage %d: %f\n", stage, scale)
o_corr_consumer_phase ^= 1
sm_stats_consumer_phase ^= 1
corr_epi_producer_phase ^= 1
else:
gmem_tiled_copy_O_for_empty_tile = None
if const_expr(self.use_correction_warps_for_epi):
gmem_tiled_copy_O_for_empty_tile = gmem_tiled_copy_O
if const_expr(self.use_block_sparsity):
(
sm_stats_consumer_phase,
o_corr_consumer_phase,
corr_epi_producer_phase,
) = handle_block_sparse_empty_tile_correction_sm100(
tidx,
self.q_stage,
self.m_block_size,
self.qhead_per_kvhead,
self.pack_gqa,
self.is_split_kv,
learnable_sink,
mLSE,
seqlen,
m_block,
head_idx,
batch_idx,
split_idx,
sScale,
stats,
self.correction_epilogue,
thr_mma_pv,
tOtO,
sO,
pipeline_sm_stats,
sm_stats_barrier,
pipeline_o_epi,
sm_stats_consumer_phase,
o_corr_consumer_phase,
corr_epi_producer_phase,
softmax_scale_log2,
mO_cur,
gO,
gmem_tiled_copy_O_for_empty_tile,
)
if const_expr(mLSE is not None):
if const_expr(not seqlen.has_cu_seqlens_q):
if const_expr(self.is_split_kv):
mLSE_cur = mLSE[None, head_idx, batch_idx, split_idx]
else:
mLSE_cur = mLSE[None, head_idx, batch_idx]
else:
offset = (
seqlen.offset_q if const_expr(not self.pack_gqa) else (0, seqlen.offset_q)
)
if const_expr(self.is_split_kv):
mLSE_cur = cute.domain_offset((offset,), mLSE[None, head_idx, split_idx])
else:
mLSE_cur = cute.domain_offset((offset,), mLSE[None, head_idx])
for stage in cutlass.range_constexpr(self.q_stage):
m_tile_idx = (m_block * self.q_stage + stage) * self.cta_group_size + mma_tile_coord_v
gLSE = cute.local_tile(mLSE_cur, (self.m_block_size,), (m_tile_idx,))
row_sum, row_max, acc_O_mn_row_is_zero_or_nan = stats[stage]
# if tidx == 0 and stage <= 1:
# cute.printf("row_sum = {}, row_max = {}, acc_O_mn_row_is_zero_or_nan = {}\n", row_sum, row_max, acc_O_mn_row_is_zero_or_nan)
LN2 = math.log(2.0)
lse = (
(row_max * softmax_scale_log2 + cute.math.log2(row_sum, fastmath=True)) * LN2
if not acc_O_mn_row_is_zero_or_nan
else -Float32.inf
)
seqlen_q = (
seqlen.seqlen_q
if const_expr(not self.pack_gqa)
else seqlen.seqlen_q * self.qhead_per_kvhead
)
if tidx < seqlen_q - m_tile_idx * self.m_block_size:
# This actually just works with PackGQA too
gLSE[tidx] = lse
# Advance to next tile
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
# End of persistent scheduler loop
# This is equivalent to pipeline_o_epi.consumer_tail() for the correction warps
if const_expr(not self.use_correction_warps_for_epi):
pipeline_o_epi.producer_acquire_w_index_phase(self.q_stage - 1, corr_epi_producer_phase)
@cute.jit
def correction_rescale(
self,
thr_mma: cute.core.ThrMma,
tOtO: cute.Tensor,
tidx: Int32,
scale: Float32,
):
"""Rescale intermediate attention results based on softmax normalization factor.
This method performs a crucial correction step in the attention computation pipeline.
When processing attention in blocks, the softmax normalization factors may change
as new blocks are processed. This method rescales previously computed partial
output values to account for updated normalization factors.
The implementation uses efficient tensor memory operations to:
1. Load existing partial attention output from tensor memory
2. Apply the scaling factor to all elements
3. Store the rescaled results back to tensor memory
"""
tOcO = thr_mma.partition_C(cute.make_identity_tensor(self.mma_tiler_pv[:2]))
corr_tile_size = 16 # tuneable parameter
tmem_load_atom = cute.make_copy_atom(
tcgen05.copy.Ld32x32bOp(tcgen05.copy.Repetition(corr_tile_size)), self.pv_acc_dtype
)
tmem_store_atom = cute.make_copy_atom(
tcgen05.copy.St32x32bOp(tcgen05.copy.Repetition(corr_tile_size)),
self.pv_acc_dtype,
)
tOtO_i = cute.composition(tOtO, cute.make_layout((self.m_block_size, corr_tile_size)))
tOcO_i = cute.composition(tOcO, cute.make_layout((self.m_block_size, corr_tile_size)))
thr_tmem_load = tcgen05.make_tmem_copy(tmem_load_atom, tOtO_i).get_slice(tidx)
thr_tmem_store = tcgen05.make_tmem_copy(tmem_store_atom, tOtO_i).get_slice(tidx)
tOtO_t2r = thr_tmem_load.partition_S(tOtO_i)
tOrO_t2r_shape = thr_tmem_load.partition_D(tOcO_i).shape
tOtO_r2t = thr_tmem_store.partition_D(tOtO_i)
frg_count = self.head_dim_v_padded // corr_tile_size
tOrO_frg = cute.make_fragment((tOrO_t2r_shape, frg_count), self.pv_acc_dtype)
for i in cutlass.range_constexpr(frg_count):
tOrO_frg = cute.make_fragment(tOrO_t2r_shape, self.pv_acc_dtype)
tOtO_t2r_i = cute.make_tensor(tOtO_t2r.iterator + i * corr_tile_size, tOtO_t2r.layout)
cute.copy(thr_tmem_load, tOtO_t2r_i, tOrO_frg)
for j in cutlass.range(0, cute.size(tOrO_frg), 2, unroll_full=True):
tOrO_frg[j], tOrO_frg[j + 1] = cute.arch.mul_packed_f32x2(
(tOrO_frg[j], tOrO_frg[j + 1]), (scale, scale)
)
tOtO_r2t_i = cute.make_tensor(tOtO_r2t.iterator + i * corr_tile_size, tOtO_r2t.layout)
cute.copy(thr_tmem_store, tOrO_frg, tOtO_r2t_i)
cute.arch.fence_view_async_tmem_store()
@cute.jit
def correction_epilogue(
self,
thr_mma: cute.core.ThrMma,
tOtO: cute.Tensor,
tidx: Int32,
stage: Int32,
m_block: Int32,
seqlen_q: Int32,
scale: Float32,
sO: cute.Tensor,
mO_cur: Optional[cute.Tensor] = None,
gO: Optional[cute.Tensor] = None,
gmem_tiled_copy_O: Optional[cute.TiledCopy] = None,
):
"""Apply final scaling and transformation to attention output before writing to global memory.
This correction_epilogue function handles the final processing step for attention output values.
It applies a scaling factor to the accumulated attention results and prepares the
data for efficient transfer back to global memory.
The method performs:
1. Loading of accumulated attention results from tensor memory
2. Application of the final output scaling factor
3. Type conversion if necessary (typically from higher precision accumulator to output precision)
4. Reorganization of data for optimal memory access patterns
5. Preparation for efficient TMA store operations
:param thr_mma: Thread MMA operation for the computation
:type thr_mma: cute.core.ThrMma
:param tOtO: Tensor containing accumulated attention output
:type tOtO: cute.Tensor
:param scale: Final scaling factor to apply to the output
:type scale: Float32
:param sO: Shared memory tensor for the final output
:type sO: cute.Tensor
"""
corr_tile_size = 8 * 32 // self.o_dtype.width
# Use CTA 0 mapping for smem partitioning since sO is per-CTA sized
tOsO = thr_mma.get_slice(0).partition_C(sO)
tOcO = thr_mma.partition_C(cute.make_identity_tensor(self.mma_tiler_pv[:2]))
tOtO_i = cute.logical_divide(tOtO, cute.make_layout((self.m_block_size, corr_tile_size)))
tOcO_i = cute.logical_divide(tOcO, cute.make_layout((self.m_block_size, corr_tile_size)))
tOsO_i = cute.logical_divide(tOsO, cute.make_layout((self.m_block_size, corr_tile_size)))
epi_subtile = (self.epi_tile[0], corr_tile_size)
tmem_copy_atom = sm100_utils_basic.get_tmem_load_op(
self.mma_tiler_pv,
self.o_layout,
self.o_dtype,
self.pv_acc_dtype,
epi_subtile,
use_2cta_instrs=self.use_2cta_instrs,
)
tiled_tmem_load = tcgen05.make_tmem_copy(tmem_copy_atom, tOtO_i[(None, None), 0])
thr_tmem_load = tiled_tmem_load.get_slice(tidx)
smem_copy_atom = sm100_utils_basic.get_smem_store_op(
self.o_layout, self.o_dtype, self.pv_acc_dtype, tiled_tmem_load
)
tiled_smem_store = cute.make_tiled_copy_D(smem_copy_atom, tiled_tmem_load)
tOtO_t2r = thr_tmem_load.partition_S(tOtO_i[(None, None), None])
tOsO_s2r = copy_utils.partition_D_position_independent(thr_tmem_load, tOsO_i[(None, None), None])
tOcO_t2r = thr_tmem_load.partition_D(tOcO_i[(None, None), None])
for i in cutlass.range(self.head_dim_v_padded // corr_tile_size, unroll_full=True):
tOtO_t2r_i = tOtO_t2r[None, 0, 0, i]
tOsO_r2s_i = tOsO_s2r[None, 0, 0, i]
tOrO_frg = cute.make_fragment(tOcO_t2r[None, 0, 0, i].shape, self.pv_acc_dtype)
cute.copy(tiled_tmem_load, tOtO_t2r_i, tOrO_frg)
for j in cutlass.range(0, cute.size(tOrO_frg), 2, unroll_full=True):
tOrO_frg[j], tOrO_frg[j + 1] = cute.arch.mul_packed_f32x2(
(tOrO_frg[j], tOrO_frg[j + 1]), (scale, scale)
)
copy_utils.cvt_copy(tiled_smem_store, tOrO_frg, tOsO_r2s_i)
cute.arch.fence_view_async_shared()
if const_expr(self.use_correction_warps_for_epi):
assert(not self.use_tma_O)
assert(gmem_tiled_copy_O is not None)
cute.arch.barrier(barrier_id=int(NamedBarrierFwd.Epilogue),
number_of_threads=len(self.epilogue_warp_ids) * cute.arch.WARP_SIZE)
mma_tile_coord_v = thr_mma.thr_idx
m_tile_idx = (m_block * self.q_stage + stage) * self.cta_group_size + mma_tile_coord_v
self._store_O_to_gmem(
sO, gO, mO_cur, gmem_tiled_copy_O, tidx, seqlen_q, m_tile_idx
)
@cute.jit
def _store_O_to_gmem(
self,
sO_stage: cute.Tensor,
gO: cute.Tensor,
mO_cur: cute.Tensor,
gmem_tiled_copy_O: cute.TiledCopy,
tidx: Int32,
seqlen_q: Int32,
m_tile_idx: Int32,
):
"""Copy a single stage of O from smem to gmem via registers."""
gmem_thr_copy_O = gmem_tiled_copy_O.get_slice(tidx)
tOsO = gmem_thr_copy_O.partition_S(sO_stage)
cO = cute.make_identity_tensor((self.m_block_size, self.head_dim_v_padded))
tOgO = gmem_thr_copy_O.partition_D(gO)
tOcO = gmem_thr_copy_O.partition_S(cO)
t0OcO = gmem_tiled_copy_O.get_slice(0).partition_S(cO)
tOpO = copy_utils.predicate_k(tOcO, limit=mO_cur.shape[1])
pack_gqa = PackGQA(
self.m_block_size,
self.head_dim_v_padded,
self.check_hdim_v_oob,
self.qhead_per_kvhead,
)
# load acc O from smem to rmem for wider vectorization
tOrO = cute.make_fragment_like(tOsO, self.o_dtype)
cute.autovec_copy(tOsO, tOrO)
# copy acc O from rmem to gmem
if const_expr(not self.pack_gqa):
for rest_m in cutlass.range_constexpr(cute.size(tOrO.shape[1])):
if (
t0OcO[0, rest_m, 0][0] < seqlen_q - m_tile_idx * self.m_block_size - tOcO[0][0]
):
cute.copy(
gmem_tiled_copy_O,
tOrO[None, rest_m, None],
tOgO[None, rest_m, None],
pred=tOpO[None, rest_m, None]
if const_expr(self.check_hdim_v_oob)
else None,
)
else:
pack_gqa.store_O(
mO_cur, tOrO, gmem_tiled_copy_O, tidx, m_tile_idx, seqlen_q
)
@cute.jit
def epilogue_s2g(
self,
mO: cute.Tensor,
sO: cute.Tensor,
gmem_tiled_copy_O: cute.TiledCopy,
tma_atom_O: Optional[cute.CopyAtom],
pipeline_o_epi: pipeline.PipelineAsync,
block_info: BlockInfo,
num_splits: int,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
mma_tile_coord_v: Int32 = 0,
):
epi_consumer_phase = Int32(0)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
m_block, head_idx, batch_idx, split_idx = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
n_block_min, n_block_max = block_info.get_n_block_min_max(seqlen, m_block, split_idx, num_splits)
if const_expr(not self.is_split_kv) or n_block_min < n_block_max:
if const_expr(self.is_split_kv):
mO_cur = seqlen.offset_batch_Q(mO, batch_idx, dim=3)[None, None, head_idx, split_idx]
else:
mO_cur = seqlen.offset_batch_Q(mO, batch_idx, dim=3)[None, None, head_idx]
tiler_gO = ((self.mma_tiler_pv[0] * self.q_stage), self.head_dim_v_padded)
gO = cute.local_tile(mO_cur, tiler_gO, (m_block, 0)) # (128 * 2, 128)
gO = layout_utils.select(
cute.flat_divide(gO, (self.mma_tiler_pv[0],)), mode=[0, 2, 1]
) # (128, 128, 2)
gO = cute.flat_divide(gO, (self.mma_tiler_pv[0] // self.cta_group_size,))[None, mma_tile_coord_v, None, None]
if const_expr(self.use_tma_O):
store_O, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_O, 0, cute.make_layout(1), sO, gO
)
for stage in cutlass.range(self.q_stage, unroll_full=True):
# wait from corr, issue tma store on smem
# 1. wait for O0 / O1 final
pipeline_o_epi.consumer_wait_w_index_phase(stage, epi_consumer_phase)
# 2. copy O0 / O1 to gmem
store_O(src_idx=stage, dst_idx=stage)
cute.arch.cp_async_bulk_commit_group()
for stage in cutlass.range_constexpr(self.q_stage):
# Ensure O0 / O1 buffer is ready to be released
cute.arch.cp_async_bulk_wait_group(self.q_stage - 1 - stage, read=True)
pipeline_o_epi.consumer_release_w_index(stage)
else:
tidx = cute.arch.thread_idx()[0] % (
cute.arch.WARP_SIZE * len(self.epilogue_warp_ids)
)
for stage in cutlass.range_constexpr(self.q_stage):
# wait from corr, issue tma store on smem
# 1. wait for O0 / O1 final
pipeline_o_epi.consumer_wait_w_index_phase(stage, epi_consumer_phase)
# 2. copy O0 / O1 to gmem
m_tile_idx = (m_block * self.q_stage + stage) * self.cta_group_size + mma_tile_coord_v
self._store_O_to_gmem(
sO[None, None, stage], gO[None, None, stage], mO_cur, gmem_tiled_copy_O,
tidx, seqlen.seqlen_q, m_tile_idx,
)
pipeline_o_epi.consumer_release_w_index(stage)
epi_consumer_phase ^= 1
# Advance to next tile
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
def load_Q(
self,
load_Q_fn: Callable,
pipeline_q: pipeline.PipelineAsync,
block: Int32,
stage: int,
phase: Int32,
):
pipeline_q.producer_acquire_w_index_phase(stage, phase)
load_Q_fn(src_idx=block, dst_idx=stage, tma_bar_ptr=pipeline_q.sync_object_full.get_barrier(stage))
@cute.jit
def load_KV(
self,
tma_atom: Optional[cute.CopyAtom],
tXgX: Optional[cute.Tensor],
tXsX: Optional[cute.Tensor],
paged_kv_manager: Optional[PagedKVManager],
sX: cute.Tensor,
block: Int32,
pipeline_kv: pipeline.PipelineAsync,
producer_state: pipeline.PipelineState,
K_or_V: Literal["K", "V"],
page_idx: Optional[Int32] = None,
extra_tx_count: Optional[Int32] = None,
):
assert K_or_V in ("K", "V")
stage, phase = producer_state.index, producer_state.phase
extra_tx_count_kv = self.tma_copy_bytes[K_or_V] - self.tma_copy_bytes["K"]
extra_tx_count = (
extra_tx_count_kv + (extra_tx_count if extra_tx_count is not None else 0) if const_expr(self.use_tma_KV)
else None
)
extra_kwargs = {"extra_tx_count": extra_tx_count} if const_expr(self.use_tma_KV) else {}
pipeline_kv.producer_acquire(producer_state, **extra_kwargs)
if const_expr(K_or_V == "K" and self.uneven_kv_smem):
# Before this round, the smem location was occupied by V, which is smaller than
# K. So we need to wait for the stage after that (stage 1) to be empty as well.
if stage == 0:
pipeline_kv.sync_object_empty.wait(1, phase)
if const_expr(self.use_tma_KV):
assert tXgX is not None and tXsX is not None and tma_atom is not None
tXsX_cur = tXsX[None, stage]
if const_expr(self.uneven_kv_smem):
# Since this is the producer_state, the phase starts at 1, so we have to invert it
tXsX_cur = self.offset_kv_smem(tXsX_cur, stage, phase ^ 1)
# Currently we assume that page_size == n_block_size so we index into tXgX with block = 0
tXgX_cur = tXgX[None, block] if const_expr(page_idx is None) else tXgX[None, 0, page_idx]
cute.copy(tma_atom, tXgX_cur, tXsX_cur, tma_bar_ptr=pipeline_kv.producer_get_barrier(producer_state))
else:
assert paged_kv_manager is not None
assert extra_tx_count is None
paged_kv_manager.load_KV(block, sX[None, None, None, stage], K_or_V)
cute.arch.cp_async_commit_group()
pipeline_kv.sync_object_full.arrive_cp_async_mbarrier(stage)
@cute.jit
def offset_kv_smem(self, sX: cute.Tensor, stage: Int32, phase: Int32):
if const_expr(self.uneven_kv_smem):
# smem layout is [smem_large, smem_small, smem_large], and the current stride is
# (smem_large + smem_small) // 2. So for stage == 1, move right by offset if
# phase == 0, or left by offset if phase == 1.
offset = 0 if stage != 1 else self.uneven_kv_smem_offset * (1 - 2 * phase)
return cute.make_tensor(sX.iterator + offset, sX.layout)
else:
return sX
# @cute.jit
# def warp_scheduler_barrier_init(self):
# warp_group_idx = utils.canonical_warp_group_idx(sync=False)
# if warp_group_idx == 0:
# cute.arch.barrier_arrive(
# barrier_id=int(NamedBarrierFwd.WarpSchedulerWG1), number_of_threads=2 * 128,
# )
# def warp_scheduler_barrier_sync(self):
# cute.arch.barrier(
# barrier_id=int(NamedBarrierFwd.WarpSchedulerWG1) + utils.canonical_warp_group_idx(sync=False),
# number_of_threads=2 * 128
# )
# def warp_scheduler_barrier_arrive(self):
# cur_wg = utils.canonical_warp_group_idx(sync=False)
# next_wg = 1 - cur_wg
# cute.arch.barrier_arrive(
# barrier_id=int(NamedBarrierFwd.WarpSchedulerWG1) + next_wg, number_of_threads=2 * 128,
# )
@cute.jit
def apply_score_mod(
self,
tSrS_t2r,
thr_tmem_load,
thr_mma_qk,
batch_idx,
head_idx,
m_block,
n_block,
softmax,
seqlen: SeqlenInfoQK,
aux_tensors=None,
fastdiv_mods=(None, None),
head_divmod=None,
):
"""Apply score modification for SM100 (constant q_idx)."""
# Prepare index tensor with extra partition
cS = cute.make_identity_tensor((self.m_block_size, self.n_block_size))
cS = cute.domain_offset((m_block * self.m_block_size, n_block * self.n_block_size), cS)
tScS = thr_mma_qk.partition_C(cS)
tScS = tScS[(None, None), 0, 0]
tScS_t2r = thr_tmem_load.partition_D(tScS)
# Shared q_idx for all scores
q_idx_logical = tScS_t2r[0][0]
# For Pack-GQA, compute the logical head index for this tile
if cutlass.const_expr(self.pack_gqa):
assert head_divmod is not None
# Building up the logical q_head idx: final_q_head = kv_head * qhead_per_kvhead + (q_physical % qhead_per_kvhead)
q_physical = q_idx_logical
q_idx_logical, head_offset = divmod(q_physical, head_divmod)
head_idx = head_idx * self.qhead_per_kvhead + head_offset
if cutlass.const_expr(aux_tensors is not None):
seqlen_q_divmod, _ = fastdiv_mods
_, q_idx_logical = divmod(q_idx_logical, seqlen_q_divmod)
apply_score_mod_inner(
tSrS_t2r,
tScS_t2r,
self.score_mod,
batch_idx,
head_idx,
softmax.softmax_scale,
self.vec_size,
self.qk_acc_dtype,
aux_tensors,
fastdiv_mods,
seqlen_info=seqlen,
constant_q_idx=q_idx_logical,
qhead_per_kvhead=self.qhead_per_kvhead if cutlass.const_expr(self.pack_gqa) else 1,
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/flash_fwd_sm100.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 2668,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/cute/mma_sm100_desc.py | # Copyright (c) 2025, Tri Dao.
# Ported Cutlass code from C++ to Python:
# https://github.com/NVIDIA/cutlass/blob/main/include/cute/arch/mma_sm100_desc.hpp
# https://github.com/NVIDIA/cutlass/blob/main/include/cute/atom/mma_traits_sm100.hpp
from enum import IntEnum
import cutlass
import cutlass.cute as cute
# ---------------------------------------------------------------------------
# Enumerations that match the HW encodings (values MUST stay identical)
# ---------------------------------------------------------------------------
class Major(IntEnum): # matrix “layout” in the ISA docs
K = 0
MN = 1
class ScaleIn(IntEnum): # negate flags
One = 0
Neg = 1
class Saturate(IntEnum):
False_ = 0
True_ = 1
class CFormat(IntEnum): # 2-bit field (bits 4-5)
F16 = 0
F32 = 1
S32 = 2
class F16F32Format(IntEnum): # 3-bit field (A/B element type)
F16 = 0
BF16 = 1
TF32 = 2
class S8Format(IntEnum):
UINT8 = 0
INT8 = 1
class MXF8F6F4Format(IntEnum):
E4M3 = 0
E5M2 = 1
E2M3 = 3
E3M2 = 4
E2M1 = 5
class MaxShift(IntEnum):
NoShift = 0
MaxShift8 = 1
MaxShift16 = 2
MaxShift32 = 3
# ---------------------------------------------------------------------------
# CUTLASS-type → encoding helpers
# ---------------------------------------------------------------------------
def to_UMMA_format(cutlass_type) -> int:
"""
Map a CUTLASS scalar class to the 3-bit encoding for Matrix A/B.
"""
if cutlass_type is cutlass.Int8:
return S8Format.INT8
# Unsigned 8-bit (if available in your CUTLASS build)
if cutlass_type is cutlass.Uint8:
return S8Format.UINT8
# FP-16 / BF-16
if cutlass_type is cutlass.Float16:
return F16F32Format.F16
if cutlass_type is cutlass.BFloat16:
return F16F32Format.BF16
# TensorFloat-32 (8-bit exponent, 10-bit mantissa packed in 19 bits)
if cutlass_type is cutlass.TFloat32:
return F16F32Format.TF32
# Float-8 / Float-6 / Float-4 – add whenever CUTLASS exposes them
if cutlass_type is cutlass.FloatE4M3FN:
return MXF8F6F4Format.E4M3
if cutlass_type is cutlass.FloatE5M2:
return MXF8F6F4Format.E5M2
raise TypeError(f"Unsupported CUTLASS scalar type for A/B: {cutlass_type!r}")
def to_C_format(cutlass_type) -> int:
"""
Map a CUTLASS scalar class to the 2-bit accumulator encoding.
"""
if cutlass_type is cutlass.Float16:
return CFormat.F16
if cutlass_type is cutlass.Float32:
return CFormat.F32
if cutlass_type is cutlass.Int32:
return CFormat.S32
raise TypeError(f"Unsupported CUTLASS scalar type for accumulator: {cutlass_type!r}")
# ---------------------------------------------------------------------------
# The constructor – accepts only CUTLASS scalar classes
# ---------------------------------------------------------------------------
def make_instr_desc(
a_type, # CUTLASS scalar class, e.g. cutlass.Int8
b_type,
c_type,
M: int, # 64, 128 or 256
N: int, # 8 … 256 (multiple of 8)
a_major: Major,
b_major: Major,
a_neg: ScaleIn = ScaleIn.One,
b_neg: ScaleIn = ScaleIn.One,
c_sat: Saturate = Saturate.False_,
is_sparse: bool = False,
max_shift: MaxShift = MaxShift.NoShift,
) -> int:
"""
Build the 32-bit instruction descriptor for Blackwell MMA.
All matrix/accumulator **types must be CUTLASS scalar classes** –
passing integers is forbidden.
"""
# --- encode element formats -------------------------------------------------
a_fmt = int(to_UMMA_format(a_type))
b_fmt = int(to_UMMA_format(b_type))
c_fmt = int(to_C_format(c_type))
# --- range checks on M/N -----------------------------------------------------
if M not in (64, 128, 256):
raise ValueError("M must be 64, 128 or 256")
if N < 8 or N > 256 or (N & 7):
raise ValueError("N must be a multiple of 8 in the range 8…256")
m_dim = M >> 4 # 5-bit field
n_dim = N >> 3 # 6-bit field
# fmt: off
# --- pack the bit-fields -----------------------------------------------------
desc = 0
desc |= (0 & 0x3) << 0 # sparse_id2 (always 0 here)
desc |= (int(is_sparse) & 0x1) << 2 # sparse_flag
desc |= (int(c_sat) & 0x1) << 3 # saturate
desc |= (c_fmt & 0x3) << 4 # c_format
desc |= (a_fmt & 0x7) << 7 # a_format
desc |= (b_fmt & 0x7) << 10 # b_format
desc |= (int(a_neg) & 0x1) << 13 # a_negate
desc |= (int(b_neg) & 0x1) << 14 # b_negate
desc |= (int(a_major) & 0x1) << 15 # a_major
desc |= (int(b_major) & 0x1) << 16 # b_major
desc |= (n_dim & 0x3F) << 17 # n_dim (6 bits)
desc |= (m_dim & 0x1F) << 24 # m_dim (5 bits)
desc |= (int(max_shift) & 0x3) << 30 # max_shift (2 bits)
# fmt: on
return desc & 0xFFFF_FFFF # ensure 32-bit result
def mma_op_to_idesc(op: cute.nvgpu.tcgen05.mma.MmaOp):
return make_instr_desc(
op.a_dtype,
op.b_dtype,
op.acc_dtype,
op.shape_mnk[0],
op.shape_mnk[1],
Major.K if op.a_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K else Major.MN,
Major.K if op.b_major_mode == cute.nvgpu.tcgen05.mma.OperandMajorMode.K else Major.MN,
)
class LayoutType(IntEnum): # occupies the top-3 bits [61:64)
SWIZZLE_NONE = 0 # (a.k.a. “INTERLEAVE” in older docs)
SWIZZLE_128B_BASE32B = 1
SWIZZLE_128B = 2
SWIZZLE_64B = 4
SWIZZLE_32B = 6
# values 3,5,7 are reserved / illegal for UMMA
# ---------------------------------------------------------------------------
# Helpers – figure out the SWIZZLE_* family from the tensor layout
# ---------------------------------------------------------------------------
def _layout_type(swizzle: cute.Swizzle) -> LayoutType:
B, M, S = swizzle.num_bits, swizzle.num_base, swizzle.num_shift
if M == 4: # Swizzle<*,4,3>
if S != 3:
raise ValueError("Unexpected swizzle shift – want S==3 for M==4")
return {
0: LayoutType.SWIZZLE_NONE,
1: LayoutType.SWIZZLE_32B,
2: LayoutType.SWIZZLE_64B,
3: LayoutType.SWIZZLE_128B,
}[B] # KeyError ⇒ invalid B→ raise
if M == 5: # Swizzle<2,5,2> (the only legal triple for M==5)
if (B, S) != (2, 2):
raise ValueError("Only Swizzle<2,5,2> supported for 128B_BASE32B")
return LayoutType.SWIZZLE_128B_BASE32B
# Any other (M,B,S) triple is not a UMMA-legal shared-memory layout
raise ValueError("Unsupported swizzle triple for UMMA smem descriptor")
def make_smem_desc_base(layout: cute.Layout, swizzle: cute.Swizzle, major: Major) -> int:
"""
Convert a 2-D *shared-memory* Cute layout into the Blackwell 64-bit
smem-descriptor, without the smem start address.
layout must correspond to layout of an uint128 tensor.
"""
# ------------------------------------------------------------------ meta
layout_type = _layout_type(swizzle) # resolve SWIZZLE_* family
VERSION = 1 # bits 46–47
LBO_MODE = 0 # bit 52
BASE_OFFSET = 0 # bits 49–51 (CUTLASS always 0)
# ---------------------------------------------------------- strides (units: uint128_t = 16 B)
swizzle_atom_mn_size = {
LayoutType.SWIZZLE_NONE: 1,
LayoutType.SWIZZLE_32B: 2,
LayoutType.SWIZZLE_64B: 4,
LayoutType.SWIZZLE_128B: 8,
LayoutType.SWIZZLE_128B_BASE32B: 8,
}[layout_type]
if major is Major.MN:
swizzle_atom_k_size = 4 if layout_type is LayoutType.SWIZZLE_128B_BASE32B else 8
canonical_layout = cute.logical_divide(layout, (swizzle_atom_mn_size, swizzle_atom_k_size))
if not cute.is_congruent(canonical_layout, ((1, 1), (1, 1))):
raise ValueError("Not a canonical UMMA_MN Layout: Expected profile failure.")
stride_00 = canonical_layout.stride[0][0]
if layout_type is not LayoutType.SWIZZLE_NONE and stride_00 != 1:
raise ValueError("Not a canonical UMMA_MN Layout: Expected stride failure.")
stride_10 = canonical_layout.stride[1][0]
if stride_10 != swizzle_atom_mn_size:
raise ValueError("Not a canonical UMMA_MN Layout: Expected stride failure.")
stride_01, stride_11 = canonical_layout.stride[0][1], canonical_layout.stride[1][1]
if layout_type is LayoutType.SWIZZLE_NONE:
stride_byte_offset, leading_byte_offset = stride_01, stride_11
else:
stride_byte_offset, leading_byte_offset = stride_11, stride_01
else:
if layout_type == LayoutType.SWIZZLE_128B_BASE32B:
raise ValueError("SWIZZLE_128B_BASE32B is invalid for Major-K")
if not cute.size(layout.shape[0]) % 8 == 0:
raise ValueError("Not a canonical UMMA_K Layout: Expected MN-size multiple of 8.")
canonical_layout = cute.logical_divide(layout, (8, 2))
if not cute.is_congruent(canonical_layout, ((1, 1), (1, 1))):
raise ValueError("Not a canonical UMMA_K Layout: Expected profile failure.")
stride_00 = canonical_layout.stride[0][0]
if stride_00 != swizzle_atom_mn_size:
raise ValueError("Not a canonical UMMA_K Layout: Expected stride failure.")
stride_10 = canonical_layout.stride[1][0]
if layout_type is not LayoutType.SWIZZLE_NONE and stride_10 != 1:
raise ValueError("Not a canonical UMMA_K Layout: Expected stride failure.")
stride_01 = canonical_layout.stride[0][1]
stride_byte_offset, leading_byte_offset = stride_01, stride_10
# ------------------------------------------------------------------ pack
desc = 0
# leading_byte_offset_ [16:30)
desc |= (leading_byte_offset & 0x3FFF) << 16
# stride_byte_offset_ [32:46)
desc |= (stride_byte_offset & 0x3FFF) << 32
# version_ [46:48)
desc |= (VERSION & 0x3) << 46
# base_offset_ [49:52)
desc |= (BASE_OFFSET & 0x7) << 49
# lbo_mode_ [52:53)
desc |= (LBO_MODE & 0x1) << 52
# layout_type_ [61:64)
desc |= (int(layout_type) & 0x7) << 61
return desc & 0xFFFF_FFFF_FFFF_FFFF # force 64-bit width
def make_smem_desc_start_addr(start_addr: cute.Pointer) -> cutlass.Int32:
# 14 bits, remove 4 LSB (bits 0-13 in desc)
return (start_addr.toint() & 0x3FFFF) >> 4
def smem_desc_base_from_tensor(sA: cute.Tensor, major: Major) -> int:
sA_swizzle = sA.iterator.type.swizzle_type
return make_smem_desc_base(
cute.recast_layout(128, sA.element_type.width, sA.layout[0]),
sA_swizzle,
major,
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/mma_sm100_desc.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/named_barrier.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
import enum
class NamedBarrierFwd(enum.IntEnum):
Epilogue = enum.auto() # starts from 1 as barrier 0 is reserved for sync_threads()
WarpSchedulerWG1 = enum.auto()
WarpSchedulerWG2 = enum.auto()
WarpSchedulerWG3 = enum.auto()
PFull = enum.auto()
PEmpty = enum.auto()
class NamedBarrierBwd(enum.IntEnum):
Epilogue = enum.auto()
WarpSchedulerWG1 = enum.auto()
WarpSchedulerWG2 = enum.auto()
WarpSchedulerWG3 = enum.auto()
PdS = enum.auto()
dQFullWG0 = enum.auto()
dQFullWG1 = enum.auto()
dQEmptyWG0 = enum.auto()
dQEmptyWG1 = enum.auto()
class NamedBarrierBwdSm100(enum.IntEnum):
EpilogueWG1 = enum.auto()
EpilogueWG2 = enum.auto()
Compute = enum.auto()
dQaccReduce = enum.auto()
TmemPtr = enum.auto()
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/named_barrier.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/pack_gqa.py | # Copyright (c) 2025, Tri Dao.
import cutlass
import cutlass.cute as cute
from quack import layout_utils
import flash_attn.cute.utils as utils
class PackGQA:
def __init__(
self,
m_block_size: cutlass.Constexpr[int],
head_dim_padded: cutlass.Constexpr[int],
check_hdim_oob: cutlass.Constexpr[bool],
qhead_per_kvhead: cutlass.Constexpr[bool],
):
self.m_block_size = m_block_size
self.head_dim_padded = head_dim_padded
self.check_hdim_oob = check_hdim_oob
self.qhead_per_kvhead = qhead_per_kvhead
@cute.jit
def compute_ptr(
self,
tensor: cute.Tensor,
cRows: cute.Tensor,
tidx: cutlass.Int32,
block: cutlass.Int32,
threads_per_row: cutlass.Constexpr[int],
num_threads: cutlass.Constexpr[int],
):
num_ptr_per_thread = cute.ceil_div(cute.size(cRows), threads_per_row)
tPrPtr = cute.make_fragment(num_ptr_per_thread, cutlass.Int64)
for i in cutlass.range_constexpr(num_ptr_per_thread):
row = i * num_threads + cRows[tidx % threads_per_row][0]
idx = block * self.m_block_size + row
m_idx = idx // self.qhead_per_kvhead
h_idx = idx - m_idx * self.qhead_per_kvhead
tPrPtr[i] = utils.elem_pointer(tensor, ((h_idx, m_idx),)).toint()
return tPrPtr
@cute.jit
def load_Q(
self,
mQ: cute.Tensor, # ((qhead_per_kvhead, seqlen_q), headdim)
sQ: cute.Tensor, # (m_block_size, head_dim_padded)
gmem_tiled_copy: cute.TiledCopy,
tidx: cutlass.Int32,
block: cutlass.Int32,
seqlen: cutlass.Int32,
):
gmem_thr_copy = gmem_tiled_copy.get_slice(tidx)
cQ = cute.make_identity_tensor((self.m_block_size, self.head_dim_padded))
tQsQ = gmem_thr_copy.partition_D(sQ)
tQcQ = gmem_thr_copy.partition_S(cQ)
t0QcQ = gmem_thr_copy.get_slice(0).partition_S(cQ)
tQpQ = utils.predicate_k(tQcQ, limit=mQ.shape[1])
tQcQ_row = tQcQ[0, None, 0]
threads_per_row = gmem_tiled_copy.layout_tv_tiled.shape[0][0]
assert cute.arch.WARP_SIZE % threads_per_row == 0, "threads_per_row must divide WARP_SIZE"
num_threads = gmem_tiled_copy.size
tPrQPtr = self.compute_ptr(mQ[None, 0], tQcQ_row, tidx, block, threads_per_row, num_threads)
for m in cutlass.range_constexpr(cute.size(tQsQ.shape[1])):
q_ptr_i64 = utils.shuffle_sync(
tPrQPtr[m // threads_per_row], m % threads_per_row, width=threads_per_row
)
q_gmem_ptr = cute.make_ptr(
mQ.element_type, q_ptr_i64, cute.AddressSpace.gmem, assumed_align=16
)
if (
t0QcQ[0, m, 0][0]
< seqlen * self.qhead_per_kvhead - block * self.m_block_size - tQcQ_row[0][0]
):
mQ_cur = cute.make_tensor(q_gmem_ptr, (self.head_dim_padded,))
elems_per_load = cute.size(tQsQ.shape[0][0])
mQ_cur_copy = cute.tiled_divide(mQ_cur, (elems_per_load,))
for k in cutlass.range_constexpr(cute.size(tQsQ.shape[2])):
ki = tQcQ[0, 0, k][1] // elems_per_load
cute.copy(
gmem_thr_copy,
mQ_cur_copy[None, ki],
tQsQ[None, m, k],
pred=tQpQ[None, m, k] if cutlass.const_expr(self.check_hdim_oob) else None,
)
# We don't need to clear the sQ smem tiles since we'll only write out the valid outputs
@cute.jit
def store_LSE(
self,
mLSE: cute.Tensor, # (qhead_per_kvhead, seqlen_q)
tLSErLSE: cute.Tensor, # (m_block_size, head_dim_padded)
tiled_mma: cute.TiledMma,
tidx: cutlass.Int32,
block: cutlass.Int32,
seqlen: cutlass.Int32,
):
thr_mma = tiled_mma.get_slice(tidx)
caccO = cute.make_identity_tensor((self.m_block_size, self.head_dim_padded))
taccOcO = thr_mma.partition_C(caccO)
taccOcO_row = layout_utils.reshape_acc_to_mn(taccOcO)[None, 0]
assert cute.size(tLSErLSE) == cute.size(taccOcO_row)
threads_per_row = tiled_mma.tv_layout_C.shape[0][0]
assert cute.arch.WARP_SIZE % threads_per_row == 0, "threads_per_row must divide WARP_SIZE"
assert cute.size(tLSErLSE) <= threads_per_row
num_threads = tiled_mma.size
tPrLSEPtr = self.compute_ptr(mLSE, taccOcO_row, tidx, block, threads_per_row, num_threads)
for m in cutlass.range_constexpr(cute.size(tLSErLSE)):
lse_ptr_i64 = utils.shuffle_sync(
tPrLSEPtr[m // threads_per_row],
m % threads_per_row,
width=threads_per_row,
)
lse_gmem_ptr = cute.make_ptr(
mLSE.element_type, lse_ptr_i64, cute.AddressSpace.gmem, assumed_align=4
)
row = block * self.m_block_size + taccOcO_row[m][0]
# Only the thread corresponding to column 0 writes out the lse to gmem
if taccOcO[0][1] == 0 and row < seqlen * self.qhead_per_kvhead:
mLSE_copy = cute.make_tensor(lse_gmem_ptr, (1,))
mLSE_copy[0] = tLSErLSE[m]
@cute.jit
def store_O(
self,
mO: cute.Tensor, # ((qhead_per_kvhead, seqlen_q), headdim)
tOrO: cute.Tensor, # (m_block_size, head_dim_padded) split across threads according to gmem_tiled_copy
gmem_tiled_copy: cute.TiledCopy,
tidx: cutlass.Int32,
block: cutlass.Int32,
seqlen: cutlass.Int32,
):
gmem_thr_copy = gmem_tiled_copy.get_slice(tidx)
cO = cute.make_identity_tensor((self.m_block_size, self.head_dim_padded))
tOcO = gmem_thr_copy.partition_S(cO)
t0OcO = gmem_thr_copy.get_slice(0).partition_S(cO)
tOpO = utils.predicate_k(tOcO, limit=mO.shape[1])
tOcO_row = tOcO[0, None, 0]
threads_per_row = gmem_tiled_copy.layout_tv_tiled.shape[0][0]
assert cute.arch.WARP_SIZE % threads_per_row == 0, "threads_per_row must divide WARP_SIZE"
num_threads = gmem_tiled_copy.size
tPrOPtr = self.compute_ptr(mO[None, 0], tOcO_row, tidx, block, threads_per_row, num_threads)
for m in cutlass.range_constexpr(cute.size(tOrO.shape[1])):
o_ptr_i64 = utils.shuffle_sync(
tPrOPtr[m // threads_per_row], m % threads_per_row, width=threads_per_row
)
o_gmem_ptr = cute.make_ptr(
mO.element_type, o_ptr_i64, cute.AddressSpace.gmem, assumed_align=16
)
if (
t0OcO[0, m, 0][0]
< seqlen * self.qhead_per_kvhead - block * self.m_block_size - tOcO_row[0][0]
):
mO_cur = cute.make_tensor(o_gmem_ptr, (self.head_dim_padded,))
elems_per_load = cute.size(tOrO.shape[0][0])
mO_cur_copy = cute.tiled_divide(mO_cur, (elems_per_load,))
for k in cutlass.range_constexpr(cute.size(tOrO.shape[2])):
ki = tOcO[0, 0, k][1] // elems_per_load
cute.copy(
gmem_thr_copy,
tOrO[None, m, k],
mO_cur_copy[None, ki],
pred=tOpO[None, m, k] if cutlass.const_expr(self.check_hdim_oob) else None,
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/pack_gqa.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/block_info.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
from typing import Tuple, Optional
from dataclasses import dataclass
import cutlass
import cutlass.cute as cute
from cutlass import Int32, const_expr
from flash_attn.cute.seqlen_info import SeqlenInfoQK
@dataclass(frozen=True)
class BlockInfo:
tile_m: cutlass.Constexpr[int]
tile_n: cutlass.Constexpr[int]
is_causal: cutlass.Constexpr[bool]
is_local: cutlass.Constexpr[bool] = False
is_split_kv: cutlass.Constexpr[bool] = False
window_size_left: Optional[Int32] = None
window_size_right: Optional[Int32] = None
qhead_per_kvhead_packgqa: cutlass.Constexpr[int] = 1
@cute.jit
def get_n_block_min_max(
self,
seqlen_info: SeqlenInfoQK,
m_block: Int32,
split_idx: cutlass.Int32 = 0,
num_splits: cutlass.Int32 = 1,
) -> Tuple[Int32, Int32]:
n_block_max = cute.ceil_div(seqlen_info.seqlen_k, self.tile_n)
if const_expr(self.is_causal or (self.is_local and self.window_size_right is not None)):
m_idx_max = (m_block + 1) * self.tile_m
if const_expr(self.qhead_per_kvhead_packgqa > 1):
m_idx_max = cute.ceil_div(m_idx_max, self.qhead_per_kvhead_packgqa)
n_idx = m_idx_max + seqlen_info.seqlen_k - seqlen_info.seqlen_q
n_idx_right = n_idx if const_expr(self.is_causal) else n_idx + self.window_size_right
n_block_max = min(n_block_max, cute.ceil_div(n_idx_right, self.tile_n))
n_block_min = 0
if const_expr(self.is_local and self.window_size_left is not None):
m_idx_min = m_block * self.tile_m
if const_expr(self.qhead_per_kvhead_packgqa > 1):
m_idx_min = m_idx_min // self.qhead_per_kvhead_packgqa
n_idx = m_idx_min + seqlen_info.seqlen_k - seqlen_info.seqlen_q
n_idx_left = n_idx - self.window_size_left
n_block_min = cutlass.max(n_idx_left // self.tile_n, 0)
if cutlass.const_expr(self.is_split_kv):
num_n_blocks_per_split = (
cutlass.Int32(0)
if n_block_max <= n_block_min
else (n_block_max - n_block_min + num_splits - 1) // num_splits
)
n_block_min = n_block_min + split_idx * num_n_blocks_per_split
n_block_max = cutlass.min(n_block_min + num_n_blocks_per_split, n_block_max)
return n_block_min, n_block_max
@cute.jit
def get_m_block_min_max(self, seqlen_info: SeqlenInfoQK, n_block: Int32) -> Tuple[Int32, Int32]:
m_block_max = cute.ceil_div(seqlen_info.seqlen_q, self.tile_m)
m_block_min = 0
if const_expr(self.is_causal or (self.is_local and self.window_size_right is not None)):
n_idx_min = n_block * self.tile_n
m_idx = n_idx_min + seqlen_info.seqlen_q - seqlen_info.seqlen_k
m_idx_right = m_idx if const_expr(self.is_causal) else m_idx - self.window_size_right
m_block_min = max(m_block_min, m_idx_right // self.tile_m)
if const_expr(self.is_local and self.window_size_left is not None):
n_idx_max = (n_block + 1) * self.tile_n
m_idx = n_idx_max + seqlen_info.seqlen_q - seqlen_info.seqlen_k
m_idx_left = m_idx + self.window_size_left
m_block_max = min(m_block_max, cute.ceil_div(m_idx_left, self.tile_m))
return m_block_min, m_block_max
@cute.jit
def get_n_block_min_causal_local_mask(
self,
seqlen_info: SeqlenInfoQK,
m_block: Int32,
n_block_min: Int32,
) -> Int32:
"""If we have separate iterations with causal or local masking at the start, where do we stop"""
m_idx_min = m_block * self.tile_m
if const_expr(self.qhead_per_kvhead_packgqa > 1):
m_idx_min = m_idx_min // self.qhead_per_kvhead_packgqa
n_idx = m_idx_min + seqlen_info.seqlen_k - seqlen_info.seqlen_q
n_idx_right = (
n_idx
if const_expr(not self.is_local or self.window_size_right is None)
else n_idx + self.window_size_right
)
return cutlass.max(n_block_min, n_idx_right // self.tile_n)
@cute.jit
def get_n_block_min_before_local_mask(
self,
seqlen_info: SeqlenInfoQK,
m_block: Int32,
n_block_min: Int32,
) -> Int32:
"""If we have separate iterations with local masking at the end, where do we stop the non-masked iterations"""
if const_expr(not self.is_local or self.window_size_left is None):
return n_block_min
else:
m_idx_max = (m_block + 1) * self.tile_m
if const_expr(self.qhead_per_kvhead_packgqa > 1):
m_idx_max = cute.ceil_div(m_idx_max, self.qhead_per_kvhead_packgqa)
n_idx = m_idx_max + seqlen_info.seqlen_k - seqlen_info.seqlen_q
n_idx_left = n_idx - self.window_size_left
return cutlass.max(n_block_min, cute.ceil_div(n_idx_left, self.tile_n))
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/block_info.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/pipeline.py | # Copyright (c) 2025, Tri Dao.
# import math
from typing import Optional
from dataclasses import dataclass
import cutlass.cute as cute
from cutlass import Boolean, Int32, const_expr
from cutlass.cutlass_dsl import if_generate, dsl_user_op
from cutlass.pipeline import PipelineState
from cutlass.pipeline import PipelineUserType
from cutlass.pipeline import NamedBarrier as NamedBarrierOg
from cutlass.pipeline import PipelineAsync as PipelineAsyncOg
from cutlass.pipeline import PipelineTmaAsync as PipelineTmaAsyncOg
from cutlass.pipeline import PipelineTmaUmma as PipelineTmaUmmaOg
from cutlass.pipeline import PipelineUmmaAsync as PipelineUmmaAsyncOg
from cutlass.pipeline import PipelineAsyncUmma as PipelineAsyncUmmaOg
class PipelineStateSimple:
"""
Pipeline state contains an index and phase bit corresponding to the current position in the circular buffer.
Use a single Int32 to store both the index and phase bit, then we use divmod to get the
index and phase. If stages is a power of 2, divmod turns into bit twiddling.
"""
def __init__(self, stages: int, phase_index: Int32):
# assert stages < 2**16
# self._log_stages = int(math.log2(stages))
# assert 1 << self._log_stages == stages, "Number of stages must be a power of 2."
self._stages = stages
self._phase_index = phase_index
def clone(self) -> "PipelineStateSimple":
return PipelineStateSimple(self.stages, self._phase_index)
@property
def stages(self) -> int:
# return 1 << self._log_stages
return self._stages
@property
def index(self) -> Int32:
# return self._phase_index & 0xFFFF
# return self._phase_index & ((1 << self._log_stages) - 1)
if const_expr(self._stages == 1):
return Int32(0)
else:
return self._phase_index % self._stages
@property
def phase(self) -> Int32:
# return self._phase_index >> 16
# PTX docs say that the phase parity needs to be 0 or 1, so by right we need to
# take modulo 2. But in practice just passing the phase in without modulo works fine.
# return (self._phase_index >> self._log_stages) % 2
# return self._phase_index >> self._log_stages
if const_expr(self._stages == 1):
return self._phase_index
else:
return self._phase_index // self._stages
def advance(self):
if const_expr(self._stages == 1):
self._phase_index ^= 1
else:
self._phase_index += 1
# def then_body(phase_index):
# # XOR the phase bit and set the index to 0
# return (phase_index & 0xFFFF0000) ^ (1 << 16)
# def else_body(phase_index):
# return phase_index
# self._phase_index = if_generate(
# (self._phase_index & 0xFFFF) == self.stages,
# then_body,
# else_body,
# [self._phase_index],
# [Int32],
# )
def __extract_mlir_values__(self):
phase_index = self._phase_index
return [phase_index.ir_value()]
def __new_from_mlir_values__(self, values):
return PipelineStateSimple(self.stages, Int32(values[0]))
def make_pipeline_state(type: PipelineUserType, stages: int):
"""
Creates a pipeline state. Producers are assumed to start with an empty buffer and have a flipped phase bit of 1.
"""
if type is PipelineUserType.Producer:
# return PipelineStateSimple(stages, Int32(1 << 16))
return PipelineStateSimple(stages, Int32(stages))
elif type is PipelineUserType.Consumer:
return PipelineStateSimple(stages, Int32(0))
else:
assert False, "Error: invalid PipelineUserType specified for make_pipeline_state."
@dataclass(frozen=True)
class NamedBarrier(NamedBarrierOg):
@staticmethod
def create(*args, **kwargs):
obj = NamedBarrierOg.create(*args, **kwargs)
# Can't assign to __class__ directly since the dataclass is frozen
object.__setattr__(obj, "__class__", NamedBarrier)
return obj
@dsl_user_op
def arrive_w_index(self, index: Int32, *, loc=None, ip=None) -> None:
"""
The aligned flavor of arrive is used when all threads in the CTA will execute the
same instruction. See PTX documentation.
"""
cute.arch.barrier_arrive(
barrier_id=self.barrier_id + index,
number_of_threads=self.num_threads,
loc=loc,
ip=ip,
)
@dsl_user_op
def arrive_and_wait_w_index(self, index: Int32, *, loc=None, ip=None) -> None:
cute.arch.barrier(
barrier_id=self.barrier_id + index,
number_of_threads=self.num_threads,
loc=loc,
ip=ip,
)
@dataclass(frozen=True)
class PipelineAsync(PipelineAsyncOg):
@staticmethod
def create(*args, **kwargs):
obj = PipelineAsyncOg.create(*args, **kwargs)
# Can't assign to __class__ directly since the dataclass is frozen
# obj.__class__ = PipelineAsync
object.__setattr__(obj, "__class__", PipelineAsync)
return obj
@dsl_user_op
def producer_acquire_w_index_phase(
self,
index: Int32,
phase: Int32,
try_acquire_token: Optional[Boolean] = None,
*,
loc=None,
ip=None,
):
if_generate(
try_acquire_token is None or try_acquire_token == 0,
lambda: self.sync_object_empty.wait(index, phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
@dsl_user_op
def producer_commit_w_index(self, index: Int32, *, loc=None, ip=None):
self.sync_object_full.arrive(index, self.producer_mask, loc=loc, ip=ip)
@dsl_user_op
def consumer_wait_w_index_phase(
self,
index: Int32,
phase: Int32,
try_wait_token: Optional[Boolean] = None,
*,
loc=None,
ip=None,
):
if_generate(
try_wait_token is None or try_wait_token == 0,
lambda: self.sync_object_full.wait(index, phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
@dsl_user_op
def consumer_release_w_index(self, index: Int32, *, loc=None, ip=None):
self.sync_object_empty.arrive(index, self.consumer_mask, loc=loc, ip=ip)
@dataclass(frozen=True)
class PipelineTmaAsync(PipelineTmaAsyncOg):
"""
Override producer_acquire to take in extra_tx_count parameter.
"""
@staticmethod
def create(*args, **kwargs):
obj = PipelineTmaAsyncOg.create(*args, **kwargs)
# Can't assign to __class__ directly since the dataclass is frozen
object.__setattr__(obj, "__class__", PipelineTmaAsync)
return obj
@dsl_user_op
def producer_acquire(
self,
state: PipelineState,
try_acquire_token: Optional[Boolean] = None,
extra_tx_count: int = 0,
*,
loc=None,
ip=None,
):
"""
TMA producer commit conditionally waits on buffer empty and sets the transaction barrier for leader threadblocks.
"""
if_generate(
try_acquire_token is None or try_acquire_token == 0,
lambda: self.sync_object_empty.wait(state.index, state.phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
if const_expr(extra_tx_count == 0):
self.sync_object_full.arrive(state.index, self.producer_mask, loc=loc, ip=ip)
else:
tx_count = self.sync_object_full.tx_count + extra_tx_count
self.sync_object_full.arrive_and_expect_tx(state.index, tx_count, loc=loc, ip=ip)
@dataclass(frozen=True)
class PipelineTmaUmma(PipelineTmaUmmaOg):
"""
Override producer_acquire to take in extra_tx_count parameter.
"""
@staticmethod
def create(*args, **kwargs):
obj = PipelineTmaUmmaOg.create(*args, **kwargs)
# Can't assign to __class__ directly since the dataclass is frozen
# obj.__class__ = PipelineTmaUmma
object.__setattr__(obj, "__class__", PipelineTmaUmma)
return obj
@dsl_user_op
def producer_acquire(
self,
state: PipelineState,
try_acquire_token: Optional[Boolean] = None,
extra_tx_count: int = 0,
*,
loc=None,
ip=None,
):
"""
TMA producer commit conditionally waits on buffer empty and sets the transaction barrier for leader threadblocks.
"""
if_generate(
try_acquire_token is None or try_acquire_token == 0,
lambda: self.sync_object_empty.wait(state.index, state.phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
if const_expr(extra_tx_count == 0):
if_generate(
self.is_leader_cta,
lambda: self.sync_object_full.arrive(
state.index, self.producer_mask, loc=loc, ip=ip
),
loc=loc,
ip=ip,
)
else:
tx_count = self.sync_object_full.tx_count + extra_tx_count
if_generate(
self.is_leader_cta,
lambda: self.sync_object_full.arrive_and_expect_tx(
state.index, tx_count, loc=loc, ip=ip
),
loc=loc,
ip=ip,
)
@dsl_user_op
def producer_acquire_w_index_phase(
self,
index: Int32,
phase: Int32,
try_acquire_token: Optional[Boolean] = None,
*,
loc=None,
ip=None,
):
"""
TMA producer commit conditionally waits on buffer empty and sets the transaction barrier for leader threadblocks.
"""
if_generate(
try_acquire_token is None or try_acquire_token == 0,
lambda: self.sync_object_empty.wait(index, phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
if_generate(
self.is_leader_cta,
lambda: self.sync_object_full.arrive(index, self.producer_mask, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
@dsl_user_op
def consumer_wait_w_index_phase(
self,
index: Int32,
phase: Int32,
try_wait_token: Optional[Boolean] = None,
*,
loc=None,
ip=None,
):
if_generate(
try_wait_token is None or try_wait_token == 0,
lambda: self.sync_object_full.wait(index, phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
@dsl_user_op
def consumer_release_w_index(self, index: Int32, *, loc=None, ip=None):
"""
UMMA consumer release buffer empty, cta_group needs to be provided.
"""
self.sync_object_empty.arrive(index, self.consumer_mask, self.cta_group, loc=loc, ip=ip)
@dataclass(frozen=True)
class PipelineUmmaAsync(PipelineUmmaAsyncOg):
@staticmethod
def create(*args, **kwargs):
obj = PipelineUmmaAsyncOg.create(*args, **kwargs)
# Can't assign to __class__ directly since the dataclass is frozen
object.__setattr__(obj, "__class__", PipelineUmmaAsync)
return obj
@dsl_user_op
def producer_acquire_w_index_phase(
self,
index: Int32,
phase: Int32,
try_acquire_token: Optional[Boolean] = None,
*,
loc=None,
ip=None,
):
if_generate(
try_acquire_token is None or try_acquire_token == 0,
lambda: self.sync_object_empty.wait(index, phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
@dsl_user_op
def producer_commit_w_index(self, index: Int32, *, loc=None, ip=None):
"""
UMMA producer commit buffer full, cta_group needs to be provided.
"""
self.sync_object_full.arrive(index, self.producer_mask, self.cta_group, loc=loc, ip=ip)
@dsl_user_op
def consumer_wait_w_index_phase(
self,
index: Int32,
phase: Int32,
try_wait_token: Optional[Boolean] = None,
*,
loc=None,
ip=None,
):
if_generate(
try_wait_token is None or try_wait_token == 0,
lambda: self.sync_object_full.wait(index, phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
@dsl_user_op
def consumer_release_w_index(self, index: Int32, *, loc=None, ip=None):
self.sync_object_empty.arrive(index, self.consumer_mask, loc=loc, ip=ip)
@dataclass(frozen=True)
class PipelineAsyncUmma(PipelineAsyncUmmaOg):
@staticmethod
def create(*args, **kwargs):
obj = PipelineAsyncUmmaOg.create(*args, **kwargs)
# Can't assign to __class__ directly since the dataclass is frozen
object.__setattr__(obj, "__class__", PipelineAsyncUmma)
return obj
@dsl_user_op
def producer_acquire_w_index_phase(
self,
index: Int32,
phase: Int32,
try_acquire_token: Optional[Boolean] = None,
*,
loc=None,
ip=None,
):
if_generate(
try_acquire_token is None or try_acquire_token == 0,
lambda: self.sync_object_empty.wait(index, phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
@dsl_user_op
def producer_commit_w_index(self, index: Int32, *, loc=None, ip=None):
self.sync_object_full.arrive(index, self.producer_mask, loc=loc, ip=ip)
@dsl_user_op
def consumer_wait_w_index_phase(
self,
index: Int32,
phase: Int32,
try_wait_token: Optional[Boolean] = None,
*,
loc=None,
ip=None,
):
if_generate(
try_wait_token is None or try_wait_token == 0,
lambda: self.sync_object_full.wait(index, phase, loc=loc, ip=ip),
loc=loc,
ip=ip,
)
@dsl_user_op
def consumer_release_w_index(self, index: Int32, *, loc=None, ip=None):
"""
UMMA consumer release buffer empty, cta_group needs to be provided.
"""
self.sync_object_empty.arrive(index, self.consumer_mask, self.cta_group, loc=loc, ip=ip)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/pipeline.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 390,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/ampere_helpers.py | # Copyright (c) 2025, Tri Dao.
from typing import Type, Callable, Optional
import cutlass
import cutlass.cute as cute
def get_smem_layout_atom(dtype: Type[cutlass.Numeric], k_dim: int) -> cute.ComposedLayout:
dtype_byte = cutlass.const_expr(dtype.width // 8)
bytes_per_row = cutlass.const_expr(k_dim * dtype_byte)
smem_k_block_size = (
cutlass.const_expr(
128
if bytes_per_row % 128 == 0
else (64 if bytes_per_row % 64 == 0 else (32 if bytes_per_row % 32 == 0 else 16))
)
// dtype_byte
)
swizzle_bits = (
4
if smem_k_block_size == 128
else (3 if smem_k_block_size == 64 else (2 if smem_k_block_size == 32 else 1))
)
swizzle_base = 2 if dtype_byte == 4 else (3 if dtype_byte == 2 else 4)
return cute.make_composed_layout(
cute.make_swizzle(swizzle_bits, swizzle_base, swizzle_base),
0,
cute.make_ordered_layout(
(8 if cutlass.const_expr(k_dim % 32 == 0) else 16, smem_k_block_size), order=(1, 0)
),
)
@cute.jit
def gemm(
tiled_mma: cute.TiledMma,
acc: cute.Tensor,
tCrA: cute.Tensor,
tCrB: cute.Tensor,
tCsA: cute.Tensor,
tCsB: cute.Tensor,
smem_thr_copy_A: cute.TiledCopy,
smem_thr_copy_B: cute.TiledCopy,
hook_fn: Optional[Callable] = None,
A_in_regs: cutlass.Constexpr[bool] = False,
B_in_regs: cutlass.Constexpr[bool] = False,
swap_AB: cutlass.Constexpr[bool] = False,
) -> None:
if cutlass.const_expr(swap_AB):
gemm(
tiled_mma,
acc,
tCrB,
tCrA,
tCsB,
tCsA,
smem_thr_copy_B,
smem_thr_copy_A,
hook_fn,
A_in_regs=B_in_regs,
B_in_regs=A_in_regs,
swap_AB=False,
)
else:
tCrA_copy_view = smem_thr_copy_A.retile(tCrA)
tCrB_copy_view = smem_thr_copy_B.retile(tCrB)
if cutlass.const_expr(not A_in_regs):
cute.copy(smem_thr_copy_A, tCsA[None, None, 0], tCrA_copy_view[None, None, 0])
if cutlass.const_expr(not B_in_regs):
cute.copy(smem_thr_copy_B, tCsB[None, None, 0], tCrB_copy_view[None, None, 0])
for k in cutlass.range_constexpr(cute.size(tCsA.shape[2])):
if k < cute.size(tCsA.shape[2]) - 1:
if cutlass.const_expr(not A_in_regs):
cute.copy(
smem_thr_copy_A, tCsA[None, None, k + 1], tCrA_copy_view[None, None, k + 1]
)
if cutlass.const_expr(not B_in_regs):
cute.copy(
smem_thr_copy_B, tCsB[None, None, k + 1], tCrB_copy_view[None, None, k + 1]
)
cute.gemm(tiled_mma, acc, tCrA[None, None, k], tCrB[None, None, k], acc)
if cutlass.const_expr(k == 0 and hook_fn is not None):
hook_fn()
@cute.jit
def gemm_rs(
tiled_mma: cute.TiledMma,
acc: cute.Tensor,
tCrA: cute.Tensor,
tCrB: cute.Tensor,
tCsB: cute.Tensor,
smem_thr_copy_B: cute.TiledCopy,
hook_fn: Optional[Callable] = None,
) -> None:
tCrB_copy_view = smem_thr_copy_B.retile(tCrB)
cute.copy(smem_thr_copy_B, tCsB[None, None, 0], tCrB_copy_view[None, None, 0])
for k in cutlass.range_constexpr(cute.size(tCrA.shape[2])):
if cutlass.const_expr(k < cute.size(tCrA.shape[2]) - 1):
cute.copy(smem_thr_copy_B, tCsB[None, None, k + 1], tCrB_copy_view[None, None, k + 1])
cute.gemm(tiled_mma, acc, tCrA[None, None, k], tCrB[None, None, k], acc)
if cutlass.const_expr(k == 0 and hook_fn is not None):
hook_fn()
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/ampere_helpers.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/flash_bwd.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
# A reimplementation of https://github.com/Dao-AILab/flash-attention/blob/main/hopper/mainloop_bwd_sm80.hpp
# from Cutlass C++ to Cute-DSL.
import math
from types import SimpleNamespace
from typing import Type, Callable, Optional
from functools import partial
import cuda.bindings.driver as cuda
import cutlass
import cutlass.cute as cute
from cutlass.cute.nvgpu import cpasync, warp
from cutlass import Float32, Int32
import cutlass.utils as utils_basic
from quack import layout_utils
from flash_attn.cute import ampere_helpers as sm80_utils
from flash_attn.cute.cute_dsl_utils import assume_tensor_aligned
from flash_attn.cute import utils
from flash_attn.cute.mask import AttentionMask
from flash_attn.cute.seqlen_info import SeqlenInfoQK
from quack.cute_dsl_utils import ParamsBase
from flash_attn.cute.tile_scheduler import SingleTileScheduler, SingleTileVarlenScheduler, TileSchedulerArguments
class FlashAttentionBackwardSm80:
def __init__(
self,
dtype: Type[cutlass.Numeric],
head_dim: int,
head_dim_v: Optional[int] = None,
qhead_per_kvhead: int = 1,
m_block_size: int = 64,
n_block_size: int = 128,
num_stages_Q: int = 2,
num_stages_dO: int = 2,
num_threads: int = 256,
pack_gqa: bool = False,
is_causal: bool = False,
SdP_swapAB: bool = False,
dKV_swapAB: bool = False,
dQ_swapAB: bool = False,
AtomLayoutMSdP: int = 1,
AtomLayoutNdKV: int = 8,
AtomLayoutMdQ: int = 1,
V_in_regs: bool = False,
):
"""Initializes the configuration for a flash attention v2 kernel.
All contiguous dimensions must be at least 16 bytes aligned which indicates the head dimension
should be a multiple of 8.
:param head_dim: head dimension
:type head_dim: int
:param m_block_size: m block size
:type m_block_size: int
:param n_block_size: n block size
:type n_block_size: int
:param num_threads: number of threads
:type num_threads: int
:param is_causal: is causal
"""
self.dtype = dtype
# padding head_dim to a multiple of 16 as k_block_size
hdim_multiple_of = 32
self.head_dim_padded = int(math.ceil(head_dim / hdim_multiple_of) * hdim_multiple_of)
head_dim_v = head_dim_v if head_dim_v is not None else head_dim
self.same_hdim_kv = head_dim == head_dim_v
self.head_dim_v_padded = int(math.ceil(head_dim_v / hdim_multiple_of) * hdim_multiple_of)
# Can save registers (and hence be faster) if we don't have to check hdim predication
self.check_hdim_oob = head_dim != self.head_dim_padded
self.check_hdim_v_oob = head_dim_v != self.head_dim_v_padded
self.qhead_per_kvhead = qhead_per_kvhead
self.m_block_size = m_block_size
self.n_block_size = n_block_size
self.num_threads = num_threads
self.pack_gqa = pack_gqa
self.is_causal = is_causal
self.num_stages_Q = num_stages_Q
self.num_stages_dO = num_stages_dO
self.SdP_swapAB = SdP_swapAB
self.dKV_swapAB = dKV_swapAB
self.dQ_swapAB = dQ_swapAB
self.AtomLayoutMSdP = AtomLayoutMSdP
self.AtomLayoutNdKV = AtomLayoutNdKV
self.AtomLayoutMdQ = AtomLayoutMdQ
num_mma_warps = self.num_threads // cute.arch.WARP_SIZE
self.Mma_dKV_is_RS = AtomLayoutMSdP == 1 and AtomLayoutNdKV == num_mma_warps and SdP_swapAB and not dKV_swapAB
self.V_in_regs = V_in_regs
self.share_QV_smem = V_in_regs
@staticmethod
def can_implement(
dtype, head_dim, head_dim_v, m_block_size, n_block_size, num_stages_Q, num_stages_dO,
num_threads, is_causal,
V_in_regs=False
) -> bool:
"""Check if the kernel can be implemented with the given parameters.
:param dtype: data type
:type dtype: cutlass.Numeric
:param head_dim: head dimension
:type head_dim: int
:param m_block_size: m block size
:type m_block_size: int
:param n_block_size: n block size
:type n_block_size: int
:param num_threads: number of threads
:type num_threads: int
:param is_causal: is causal
:type is_causal: bool
:return: True if the kernel can be implemented, False otherwise
:rtype: bool
"""
if dtype not in [cutlass.Float16, cutlass.BFloat16]:
return False
if head_dim % 8 != 0:
return False
if head_dim_v % 8 != 0:
return False
if n_block_size % 16 != 0:
return False
if num_threads % 32 != 0:
return False
# Check if block size setting is out of shared memory capacity
# Shared memory usage: Q tile + (K tile + V tile) where K and V use the same tile size
smem_usage_Q = m_block_size * head_dim * num_stages_Q * 2
smem_usage_dO = m_block_size * head_dim_v * num_stages_dO * 2
smem_usage_K = n_block_size * head_dim * 2
smem_usage_V = n_block_size * head_dim_v * 2
smem_usage_QV = (smem_usage_Q + smem_usage_V) if not V_in_regs else max(smem_usage_Q, smem_usage_V)
smem_usage = smem_usage_QV + smem_usage_dO + smem_usage_K
smem_capacity = utils_basic.get_smem_capacity_in_bytes("sm_80")
if smem_usage > smem_capacity:
return False
return True
def _check_type(
self,
mQ_type: Type[cutlass.Numeric],
mK_type: Type[cutlass.Numeric],
mV_type: Type[cutlass.Numeric],
mdO_type: Type[cutlass.Numeric],
mLSE_type: Type[cutlass.Numeric],
mdPsum_type: Type[cutlass.Numeric],
mdQaccum_type: Type[cutlass.Numeric],
mdK_type: Type[cutlass.Numeric],
mdV_type: Type[cutlass.Numeric],
mCuSeqlensQ_type: Type[cutlass.Numeric] | None,
mCuSeqlensK_type: Type[cutlass.Numeric] | None,
mSeqUsedQ_type: Type[cutlass.Numeric] | None,
mSeqUsedK_type: Type[cutlass.Numeric] | None,
):
if cutlass.const_expr(not (mQ_type == mK_type == mV_type == mdO_type)):
raise TypeError("All tensors must have the same data type")
if cutlass.const_expr(self.qhead_per_kvhead == 1):
if cutlass.const_expr(not (mdK_type == mdV_type == mQ_type)):
raise TypeError("mdK and mdV tensors must have the same data type as mQ")
else:
if cutlass.const_expr(not (mdK_type == mdV_type == cutlass.Float32)):
raise TypeError("mdKaccum and mdVaccum tensors must have the data type Float32")
if cutlass.const_expr(not mQ_type in [cutlass.Float16, cutlass.BFloat16]):
raise TypeError("Only Float16 or BFloat16 is supported")
if cutlass.const_expr(not mLSE_type in [cutlass.Float32]):
raise TypeError("LSE tensor must be Float32")
if cutlass.const_expr(not mdPsum_type in [cutlass.Float32]):
raise TypeError("dPsum tensor must be Float32")
if cutlass.const_expr(not mdQaccum_type in [cutlass.Float32]):
raise TypeError("dQaccum tensor must be Float32")
if cutlass.const_expr(mCuSeqlensQ_type not in [None, cutlass.Int32]):
raise TypeError("cuSeqlensQ tensor must be Int32")
if cutlass.const_expr(mCuSeqlensK_type not in [None, cutlass.Int32]):
raise TypeError("cuSeqlensK tensor must be Int32")
if cutlass.const_expr(mSeqUsedQ_type not in [None, cutlass.Int32]):
raise TypeError("SeqUsedQ tensor must be Int32")
if cutlass.const_expr(mSeqUsedK_type not in [None, cutlass.Int32]):
raise TypeError("SeqUsedK tensor must be Int32")
assert mQ_type == self.dtype
def _setup_attributes(self):
# ///////////////////////////////////////////////////////////////////////////////
# Shared memory layout: Q/K/V
# ///////////////////////////////////////////////////////////////////////////////
sQ_layout_atom = sm80_utils.get_smem_layout_atom(self.dtype, self.head_dim_padded)
self.sQ_layout = cute.tile_to_shape(
sQ_layout_atom, (self.m_block_size, self.head_dim_padded, self.num_stages_Q), (0, 1, 2),
)
sK_layout_atom = sQ_layout_atom
self.sK_layout = cute.tile_to_shape(
sK_layout_atom, (self.n_block_size, self.head_dim_padded), (0, 1),
)
sV_layout_atom = sm80_utils.get_smem_layout_atom(self.dtype, self.head_dim_v_padded)
self.sV_layout = cute.tile_to_shape(
sV_layout_atom, (self.n_block_size, self.head_dim_v_padded), (0, 1),
)
sdO_layout_atom = sV_layout_atom
self.sdO_layout = cute.tile_to_shape(
sdO_layout_atom, (self.m_block_size, self.head_dim_v_padded, self.num_stages_dO), (0, 1, 2),
)
# TODO: do we set swizzle to be 3 here explicitly?
sPdS_layout_atom = sm80_utils.get_smem_layout_atom(self.dtype, self.n_block_size)
self.sPdS_layout = cute.tile_to_shape(
sPdS_layout_atom, (self.m_block_size, self.n_block_size), (0, 1),
)
# We set stride to be multiple of 64 so that if ShuffleLSE, even if threads read from sLSE but out of bounds,
# it's still a valid smem address.
self.sLSE_layout = cute.make_layout(
(self.m_block_size, self.num_stages_Q),
stride=(1, cute.round_up(self.m_block_size, 64)),
)
sLSEMma_layout = cute.make_layout(
(self.m_block_size, self.n_block_size, self.num_stages_Q),
stride=(1, 0, cute.round_up(self.m_block_size, 64)),
)
sLSEMma_layout_transposed = cute.make_layout(
(self.n_block_size, self.m_block_size, self.num_stages_Q),
stride=(0, 1, cute.round_up(self.m_block_size, 64)),
)
self.sLSEMma_layout = sLSEMma_layout if not self.SdP_swapAB else sLSEMma_layout_transposed
# ///////////////////////////////////////////////////////////////////////////////
# GMEM Tiled copy:
# ///////////////////////////////////////////////////////////////////////////////
# Thread layouts for copies
universal_copy_bits = 128
async_copy_elems = universal_copy_bits // self.dtype.width
# atom_async_copy: async copy atom for QKV load
atom_async_copy = cute.make_copy_atom(
cpasync.CopyG2SOp(cache_mode=cpasync.LoadCacheMode.GLOBAL),
self.dtype,
num_bits_per_copy=universal_copy_bits,
)
# atom_universal_copy: universal copy atom for O store
atom_universal_copy = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(), self.dtype, num_bits_per_copy=universal_copy_bits,
)
# tQK_layout: thread layout for QK load
tQK_shape_dim_1 = sQ_layout_atom.outer.shape[1] // async_copy_elems
assert self.num_threads % tQK_shape_dim_1 == 0, "num_threads must be divisible by tQK_shape_dim_1"
tQK_layout = cute.make_ordered_layout(
(self.num_threads // tQK_shape_dim_1, tQK_shape_dim_1), order=(1, 0),
)
# Do we need to check if we overshot kBlockM when we load Q?
self.is_even_m_smem_q = self.m_block_size % tQK_layout.shape[0] == 0
# Do we need to check if we overshot kBlockN when we load K?
self.is_even_n_smem_k = self.n_block_size % tQK_layout.shape[0] == 0
tVdO_shape_dim_1 = sV_layout_atom.outer.shape[1] // async_copy_elems
assert self.num_threads % tVdO_shape_dim_1 == 0, "num_threads must be divisible by tVdO_shape_dim_1"
tVdO_layout = cute.make_ordered_layout(
(self.num_threads // tVdO_shape_dim_1, tVdO_shape_dim_1), order=(1, 0),
)
# Do we need to check if we overshot kBlockN when we load V?
self.is_even_n_smem_v = self.n_block_size % tVdO_layout.shape[0] == 0
self.is_even_m_smem_do = self.m_block_size % tVdO_layout.shape[0] == 0
# Value layouts for copies
vQKVdO_layout = cute.make_layout((1, async_copy_elems))
# gmem_tiled_copy_QK: tiled copy for QK load
self.gmem_tiled_copy_QK = cute.make_tiled_copy_tv(atom_async_copy, tQK_layout, vQKVdO_layout)
self.gmem_tiled_copy_VdO = cute.make_tiled_copy_tv(atom_async_copy, tVdO_layout, vQKVdO_layout)
self.gmem_tiled_copy_dK = cute.make_tiled_copy_tv(atom_universal_copy, tQK_layout, vQKVdO_layout)
self.gmem_tiled_copy_dV = cute.make_tiled_copy_tv(atom_universal_copy, tVdO_layout, vQKVdO_layout)
async_copy_elems_accum = universal_copy_bits // cutlass.Float32.width
# I think we wouldn't require this with smarter padding
if cutlass.const_expr(not self.varlen_q):
async_copy_elems_accum = universal_copy_bits // cutlass.Float32.width
atom_async_copy_accum = cute.make_copy_atom(
cpasync.CopyG2SOp(cache_mode=cpasync.LoadCacheMode.GLOBAL),
cutlass.Float32,
num_bits_per_copy=universal_copy_bits,
)
else:
async_copy_elems_accum = 1
atom_async_copy_accum = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(),
cutlass.Float32,
num_bits_per_copy=cutlass.Float32.width,
)
self.gmem_tiled_copy_LSE = cute.make_tiled_copy_tv(
atom_async_copy_accum,
cute.make_layout(self.num_threads),
cute.make_layout(async_copy_elems_accum),
)
self.gmem_tiled_copy_dQaccum = cute.make_tiled_copy_tv(
cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(), cutlass.Float32, num_bits_per_copy=cutlass.Float32.width
),
cute.make_layout(self.num_threads),
cute.make_layout(1)
)
if cutlass.const_expr(self.qhead_per_kvhead > 1):
self.gmem_tiled_copy_dK = self.gmem_tiled_copy_dQaccum
self.gmem_tiled_copy_dV = self.gmem_tiled_copy_dQaccum
def _get_tiled_mma(self):
num_mma_warps = self.num_threads // 32
AtomLayoutSdP = (self.AtomLayoutMSdP, num_mma_warps // self.AtomLayoutMSdP, 1) if cutlass.const_expr(not self.SdP_swapAB) else (num_mma_warps // self.AtomLayoutMSdP, self.AtomLayoutMSdP, 1)
tiled_mma_sdp = cute.make_tiled_mma(
warp.MmaF16BF16Op(self.dtype, cutlass.Float32, (16, 8, 16)),
AtomLayoutSdP,
permutation_mnk=(AtomLayoutSdP[0] * 16, AtomLayoutSdP[1] * 16, 16),
)
AtomLayoutdKV = (self.AtomLayoutNdKV, num_mma_warps // self.AtomLayoutNdKV, 1) if cutlass.const_expr(not self.dKV_swapAB) else (num_mma_warps // self.AtomLayoutNdKV, self.AtomLayoutNdKV, 1)
tiled_mma_dkv = cute.make_tiled_mma(
warp.MmaF16BF16Op(self.dtype, cutlass.Float32, (16, 8, 16)),
AtomLayoutdKV,
permutation_mnk=(AtomLayoutdKV[0] * 16, AtomLayoutdKV[1] * 16, 16),
)
AtomLayoutdQ = (self.AtomLayoutMdQ, num_mma_warps // self.AtomLayoutMdQ, 1) if cutlass.const_expr(not self.dQ_swapAB) else (num_mma_warps // self.AtomLayoutMdQ, self.AtomLayoutMdQ, 1)
tiled_mma_dq = cute.make_tiled_mma(
warp.MmaF16BF16Op(self.dtype, cutlass.Float32, (16, 8, 16)),
AtomLayoutdQ,
permutation_mnk=(AtomLayoutdQ[0] * 16, AtomLayoutdQ[1] * 16, 16),
)
return tiled_mma_sdp, tiled_mma_dkv, tiled_mma_dq
def _get_shared_storage_cls(self):
sQ_struct, sK_struct, sV_struct, sdO_struct = [
cute.struct.Align[cute.struct.MemRange[self.dtype, cute.cosize(layout)], 1024]
for layout in (self.sQ_layout, self.sK_layout, self.sV_layout, self.sdO_layout)
]
cosize_sQV = max(cute.cosize(self.sQ_layout), cute.cosize(self.sV_layout))
sQV_struct = cute.struct.Align[cute.struct.MemRange[self.dtype, cosize_sQV], 1024]
sLSE_struct, sdPsum_struct = [
cute.struct.Align[cute.struct.MemRange[cutlass.Float32, cute.cosize(layout)], 128]
for layout in (self.sLSE_layout, self.sLSE_layout)
]
sP_struct, sdS_struct = [
cute.struct.Align[cute.struct.MemRange[self.dtype, cute.cosize(layout)], 128]
for layout in (self.sPdS_layout, self.sPdS_layout)
]
@cute.struct
class SharedStorageSeparateQV:
sK: sK_struct
sV: sV_struct
sQ: sQ_struct
sdO: sdO_struct
sLSE: sLSE_struct
sdPsum: sdPsum_struct
sP: sP_struct
sdS: sdS_struct
# TODO: the case where there's no sP
@cute.struct
class SharedStorageSharedQV:
sK: sK_struct
sV: sV_struct
sQ: sQV_struct
sdO: sdO_struct
sLSE: sLSE_struct
sdPsum: sdPsum_struct
sP: sP_struct
sdS: sdS_struct
return SharedStorageSeparateQV if cutlass.const_expr(not self.share_QV_smem) else SharedStorageSharedQV
@cute.jit
def __call__(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mdO: cute.Tensor,
mLSE: cute.Tensor,
mdPsum: cute.Tensor,
mdQaccum: cute.Tensor,
mdK: cute.Tensor,
mdV: cute.Tensor,
softmax_scale: cutlass.Float32,
stream: cuda.CUstream,
mCuSeqlensQ: Optional[cute.Tensor] = None,
mCuSeqlensK: Optional[cute.Tensor] = None,
mSeqUsedQ: Optional[cute.Tensor] = None,
mSeqUsedK: Optional[cute.Tensor] = None,
softcap: Float32 | float | None = None,
window_size_left: Int32 | int | None = None,
window_size_right: Int32 | int | None = None,
mdQ_semaphore: Optional[cute.Tensor] = None,
):
assert mdQ_semaphore is None, "semaphore not supported yet"
# Get the data type and check if it is fp16 or bf16
self._check_type(*(t.element_type if t is not None else None
for t in (mQ, mK, mV, mdO, mLSE, mdPsum, mdQaccum, mdK, mdV, mCuSeqlensQ, mCuSeqlensK, mSeqUsedQ, mSeqUsedK)))
mQ, mK, mV, mdO, mLSE, mdPsum, mdQaccum, mdK, mdV = [
assume_tensor_aligned(t) for t in (mQ, mK, mV, mdO, mLSE, mdPsum, mdQaccum, mdK, mdV)
]
self.varlen_q = (mCuSeqlensQ is not None)
self._setup_attributes()
SharedStorage = self._get_shared_storage_cls()
tiled_mma_sdp, tiled_mma_dkv, tiled_mma_dq = self._get_tiled_mma()
num_head = mQ.shape[1] if cutlass.const_expr(mCuSeqlensQ is not None) else mQ.shape[2]
if cutlass.const_expr(mCuSeqlensK is not None):
TileScheduler = SingleTileVarlenScheduler
num_batch = mCuSeqlensK.shape[0] - 1
else:
TileScheduler = SingleTileScheduler
num_batch = mK.shape[0]
# Uses seqlen k, etc. since main bwd kernel's blocks are over n
tile_sched_args = TileSchedulerArguments(
num_block=cute.ceil_div(mK.shape[1], self.n_block_size),
num_head=num_head,
num_batch=num_batch,
num_splits=1,
seqlen_k=0,
headdim=mK.shape[2],
headdim_v=mV.shape[2],
total_q=mK.shape[0],
tile_shape_mn=(self.n_block_size, self.m_block_size),
qhead_per_kvhead_packgqa=self.qhead_per_kvhead if cutlass.const_expr(self.pack_gqa) else 1,
mCuSeqlensQ=mCuSeqlensK,
mSeqUsedQ=mSeqUsedK,
)
tile_sched_params = TileScheduler.to_underlying_arguments(tile_sched_args)
grid_dim = TileScheduler.get_grid_shape(tile_sched_params)
softmax_scale_log2 = softmax_scale * math.log2(math.e)
self.kernel(
mQ,
mK,
mV,
mdO,
mLSE,
mdPsum,
mdQaccum,
mdK,
mdV,
mCuSeqlensQ,
mCuSeqlensK,
mSeqUsedQ,
mSeqUsedK,
softmax_scale,
softmax_scale_log2,
self.sQ_layout,
self.sK_layout,
self.sV_layout,
self.sdO_layout,
self.sPdS_layout,
self.sLSE_layout,
self.sLSEMma_layout,
self.gmem_tiled_copy_QK,
self.gmem_tiled_copy_VdO,
self.gmem_tiled_copy_dK,
self.gmem_tiled_copy_dV,
self.gmem_tiled_copy_LSE,
self.gmem_tiled_copy_dQaccum,
tiled_mma_sdp,
tiled_mma_dkv,
tiled_mma_dq,
SharedStorage,
tile_sched_params,
TileScheduler,
).launch(
grid=grid_dim,
block=[self.num_threads, 1, 1],
smem=SharedStorage.size_in_bytes(),
stream=stream,
)
@cute.kernel
def kernel(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mdO: cute.Tensor,
mLSE: cute.Tensor,
mdPsum: cute.Tensor,
mdQaccum: cute.Tensor,
mdK: cute.Tensor,
mdV: cute.Tensor,
mCuSeqlensQ: Optional[cute.Tensor],
mCuSeqlensK: Optional[cute.Tensor],
mSeqUsedQ: Optional[cute.Tensor],
mSeqUsedK: Optional[cute.Tensor],
softmax_scale: cutlass.Float32,
softmax_scale_log2: cutlass.Float32,
sQ_layout: cute.ComposedLayout,
sK_layout: cute.ComposedLayout,
sV_layout: cute.ComposedLayout,
sdO_layout: cute.ComposedLayout,
sPdS_layout: cute.ComposedLayout,
sLSE_layout: cute.Layout,
sLSEMma_layout: cute.Layout,
gmem_tiled_copy_QK: cute.TiledCopy,
gmem_tiled_copy_VdO: cute.TiledCopy,
gmem_tiled_copy_dK: cute.TiledCopy,
gmem_tiled_copy_dV: cute.TiledCopy,
gmem_tiled_copy_LSE: cute.TiledCopy,
gmem_tiled_copy_dQaccum: cute.TiledCopy,
tiled_mma_sdp: cute.TiledMma,
tiled_mma_dkv: cute.TiledMma,
tiled_mma_dq: cute.TiledMma,
SharedStorage: cutlass.Constexpr,
tile_sched_params: ParamsBase,
TileScheduler: cutlass.Constexpr[Callable],
):
# Thread index, block index
tidx, _, _ = cute.arch.thread_idx()
tile_scheduler = TileScheduler.create(tile_sched_params)
work_tile = tile_scheduler.initial_work_tile_info()
n_block, head_idx, batch_idx, _ = work_tile.tile_idx
if work_tile.is_valid_tile:
seqlen = SeqlenInfoQK.create(batch_idx, mQ.shape[1], mK.shape[1], mCuSeqlensQ=mCuSeqlensQ, mCuSeqlensK=mCuSeqlensK, mSeqUsedQ=mSeqUsedQ, mSeqUsedK=mSeqUsedK)
m_block_max = cute.ceil_div(seqlen.seqlen_q, self.m_block_size)
m_block_min = 0
if cutlass.const_expr(self.is_causal):
m_block_min = max(
(n_block * self.n_block_size + seqlen.seqlen_q - seqlen.seqlen_k) // self.m_block_size,
m_block_min,
)
# TODO: return early if m_block_max == 0
# ///////////////////////////////////////////////////////////////////////////////
# Get the appropriate tiles for this thread block.
# ///////////////////////////////////////////////////////////////////////////////
blkQ_shape = (self.m_block_size, self.head_dim_padded)
blkK_shape = (self.n_block_size, self.head_dim_padded)
blkV_shape = (self.n_block_size, self.head_dim_v_padded)
blkdO_shape = (self.m_block_size, self.head_dim_v_padded)
if cutlass.const_expr(not seqlen.has_cu_seqlens_q):
mQ_cur = mQ[batch_idx, None, head_idx, None]
mLSE_cur = mLSE[batch_idx, head_idx, None]
mdO_cur = mdO[batch_idx, None, head_idx, None]
mdPsum_cur = mdPsum[batch_idx, head_idx, None]
mdQaccum_cur = mdQaccum[batch_idx, head_idx, None]
else:
padded_offset_q = seqlen.offset_q + batch_idx * self.m_block_size
mQ_cur = cute.domain_offset((seqlen.offset_q, 0), mQ[None, head_idx, None])
mLSE_cur = cute.domain_offset((padded_offset_q,), mLSE[head_idx, None])
mdO_cur = cute.domain_offset((seqlen.offset_q, 0), mdO[None, head_idx, None])
mdPsum_cur = cute.domain_offset((padded_offset_q,), mdPsum[head_idx, None])
mdQaccum_cur = cute.domain_offset((padded_offset_q * self.head_dim_padded,), mdQaccum[head_idx, None])
head_idx_kv = head_idx // self.qhead_per_kvhead if cutlass.const_expr(not self.pack_gqa) else head_idx
if cutlass.const_expr(not seqlen.has_cu_seqlens_k):
mK_cur, mV_cur = [t[batch_idx, None, head_idx_kv, None] for t in (mK, mV)]
else:
mK_cur, mV_cur = [cute.domain_offset((seqlen.offset_k, 0), t[None, head_idx_kv, None]) for t in (mK, mV)]
# (m_block_size, head_dim, m_block)
gQ = cute.local_tile(mQ_cur, blkQ_shape, (None, 0))
# (n_block_size, head_dim)
gK = cute.local_tile(mK_cur, blkK_shape, (n_block, 0))
# (n_block_size, head_dim_v)
gV = cute.local_tile(mV_cur, blkV_shape, (n_block, 0))
# (m_block_size, head_dim_v, m_block)
gdO = cute.local_tile(mdO_cur, blkdO_shape, (None, 0))
gLSE = cute.local_tile(mLSE_cur, (self.m_block_size,), (None,))
gdPsum = cute.local_tile(mdPsum_cur, (self.m_block_size,), (None,))
gdQaccum = cute.local_tile(mdQaccum_cur, (self.m_block_size * self.head_dim_padded,), (None,))
# ///////////////////////////////////////////////////////////////////////////////
# Get shared memory buffer
# ///////////////////////////////////////////////////////////////////////////////
smem = cutlass.utils.SmemAllocator()
storage = smem.allocate(SharedStorage)
sQ = storage.sQ.get_tensor(sQ_layout)
sK = storage.sK.get_tensor(sK_layout)
if cutlass.const_expr(not self.share_QV_smem):
sV = storage.sV.get_tensor(sV_layout)
else:
sV = cute.make_tensor(cute.recast_ptr(sQ.iterator, dtype=self.dtype), sV_layout)
sdO = storage.sdO.get_tensor(sdO_layout)
sP = storage.sP.get_tensor(sPdS_layout)
sdS = storage.sdS.get_tensor(sPdS_layout)
sLSE = storage.sLSE.get_tensor(sLSE_layout)
sdPsum = storage.sdPsum.get_tensor(sLSE_layout)
sLSEMma = storage.sLSE.get_tensor(sLSEMma_layout)
sdPsumMma = storage.sdPsum.get_tensor(sLSEMma_layout)
# Transpose view of tensors for tiled mma
sQt, sdOt, sKt, sPt, sdSt = [layout_utils.transpose_view(t) for t in (sQ, sdO, sK, sP, sdS)]
gmem_thr_copy_QK = gmem_tiled_copy_QK.get_slice(tidx)
gmem_thr_copy_VdO = gmem_tiled_copy_VdO.get_slice(tidx)
gmem_thr_copy_lse = gmem_tiled_copy_LSE.get_slice(tidx)
gmem_thr_copy_dQaccum = gmem_tiled_copy_dQaccum.get_slice(tidx)
# (CPY_Atom, CPY_M, CPY_K, m_block)
tQgQ = gmem_thr_copy_QK.partition_S(gQ)
tQsQ = gmem_thr_copy_QK.partition_D(sQ)
# (CPY_Atom, CPY_N, CPY_K)
tKgK = gmem_thr_copy_QK.partition_S(gK)
tKsK = gmem_thr_copy_QK.partition_D(sK)
# (CPY_Atom, CPY_N, CPY_K)
tVgV = gmem_thr_copy_VdO.partition_S(gV)
tVsV = gmem_thr_copy_VdO.partition_D(sV)
# (CPY_Atom, CPY_M, CPY_K, m_block)
tdOgdO = gmem_thr_copy_VdO.partition_S(gdO)
tdOsdO = gmem_thr_copy_VdO.partition_D(sdO)
tLSEgLSE = gmem_thr_copy_lse.partition_S(gLSE)
tLSEsLSE = gmem_thr_copy_lse.partition_D(sLSE)
tLSEgdPsum = gmem_thr_copy_lse.partition_S(gdPsum)
tLSEsdPsum = gmem_thr_copy_lse.partition_D(sdPsum)
tdQgdQaccum = gmem_thr_copy_dQaccum.partition_S(gdQaccum)
# ///////////////////////////////////////////////////////////////////////////////
# Tile MMA compute thread partitions and allocate accumulators
# ///////////////////////////////////////////////////////////////////////////////
thr_mma_sdp = tiled_mma_sdp.get_slice(tidx)
thr_mma_dkv = tiled_mma_dkv.get_slice(tidx)
thr_mma_dq = tiled_mma_dq.get_slice(tidx)
acc_shape_dK = thr_mma_dkv.partition_shape_C((self.n_block_size, self.head_dim_padded))
acc_shape_dV = thr_mma_dkv.partition_shape_C((self.n_block_size, self.head_dim_v_padded))
acc_dK = cute.make_fragment(acc_shape_dK, cutlass.Float32)
acc_dV = cute.make_fragment(acc_shape_dV, cutlass.Float32)
acc_dK.fill(0.0)
acc_dV.fill(0.0)
tSrQ = utils.mma_make_fragment_A(sQ[None, None, 0], thr_mma_sdp, swapAB=self.SdP_swapAB)
tSrK = utils.mma_make_fragment_B(sK, thr_mma_sdp, swapAB=self.SdP_swapAB)
tdPrdO = utils.mma_make_fragment_A(sdO[None, None, 0], thr_mma_sdp, swapAB=self.SdP_swapAB)
tdPrV = utils.mma_make_fragment_B(sV, thr_mma_sdp, swapAB=self.SdP_swapAB)
tdVrP = utils.mma_make_fragment_A(sPt, thr_mma_dkv, swapAB=self.dKV_swapAB)
tdVrdO = utils.mma_make_fragment_B(sdOt[None, None, 0], thr_mma_dkv, swapAB=self.dKV_swapAB)
tdKrdS = utils.mma_make_fragment_A(sdSt, thr_mma_dkv, swapAB=self.dKV_swapAB)
tdKrQ = utils.mma_make_fragment_B(sQt[None, None, 0], thr_mma_dkv, swapAB=self.dKV_swapAB)
tdQrdS = utils.mma_make_fragment_A(sdS, thr_mma_dq, swapAB=self.dQ_swapAB)
tdQrK = utils.mma_make_fragment_B(sKt, thr_mma_dq, swapAB=self.dQ_swapAB)
LSEslice = (None, 0, None) if cutlass.const_expr(not self.SdP_swapAB) else (0, None, None)
tSsLSEMma = layout_utils.reshape_acc_to_mn(thr_mma_sdp.partition_C(sLSEMma))[LSEslice]
tSsdPsumMma = layout_utils.reshape_acc_to_mn(thr_mma_sdp.partition_C(sdPsumMma))[LSEslice]
# ///////////////////////////////////////////////////////////////////////////////
# Smem copy atom tiling
# ///////////////////////////////////////////////////////////////////////////////
smem_copy_atom = cute.make_copy_atom(
warp.LdMatrix8x8x16bOp(transpose=False, num_matrices=4), self.dtype,
)
smem_copy_atom_transposed = cute.make_copy_atom(
warp.LdMatrix8x8x16bOp(transpose=True, num_matrices=4), self.dtype,
)
smem_thr_copy_QdO = utils.make_tiled_copy_A(
smem_copy_atom, tiled_mma_sdp, swapAB=self.SdP_swapAB
).get_slice(tidx)
smem_thr_copy_KV = utils.make_tiled_copy_B(
smem_copy_atom, tiled_mma_sdp, swapAB=self.SdP_swapAB
).get_slice(tidx)
# TODO: should this be smem_copy_atom_transposed?
smem_thr_copy_PdSt = utils.make_tiled_copy_A(
smem_copy_atom_transposed, tiled_mma_dkv, swapAB=self.dKV_swapAB
).get_slice(tidx)
smem_thr_copy_QdOt = utils.make_tiled_copy_B(
smem_copy_atom_transposed, tiled_mma_dkv, swapAB=self.dKV_swapAB
).get_slice(tidx)
smem_thr_copy_dS = utils.make_tiled_copy_A(
smem_copy_atom, tiled_mma_dq, swapAB=self.dQ_swapAB
).get_slice(tidx)
smem_thr_copy_Kt = utils.make_tiled_copy_B(
smem_copy_atom_transposed, tiled_mma_dq, swapAB=self.dQ_swapAB
).get_slice(tidx)
# TODO: what's the number of bits? What if SdP_swapAB
r2s_thr_copy_PdS = cute.make_tiled_copy_C(
cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(), self.dtype, num_bits_per_copy=2 * self.dtype.width
),
tiled_mma_sdp,
).get_slice(tidx)
tSsQ = smem_thr_copy_QdO.partition_S(sQ)
tdPsdO = smem_thr_copy_QdO.partition_S(sdO)
tSsK = smem_thr_copy_KV.partition_S(sK)
tdPsV = smem_thr_copy_KV.partition_S(sV)
tdVsPt = smem_thr_copy_PdSt.partition_S(sPt)
tdKsdSt = smem_thr_copy_PdSt.partition_S(sdSt)
tdVsdOt = smem_thr_copy_QdOt.partition_S(sdOt)
tdKsQt = smem_thr_copy_QdOt.partition_S(sQt)
tdQsdS = smem_thr_copy_dS.partition_S(sdS)
tdQsKt = smem_thr_copy_Kt.partition_S(sKt)
tPsP = r2s_thr_copy_PdS.partition_D(sP)
tdSsdS = r2s_thr_copy_PdS.partition_D(sdS)
# ///////////////////////////////////////////////////////////////////////////////
# Predicate: Mark indices that need to copy when problem_shape isn't a multiple
# of tile_shape
# ///////////////////////////////////////////////////////////////////////////////
# Construct identity layout for KV
cQ = cute.make_identity_tensor((self.m_block_size, self.head_dim_padded))
tQcQ = gmem_thr_copy_QK.partition_S(cQ)
t0QcQ = gmem_thr_copy_QK.get_slice(0).partition_S(cQ)
if cutlass.const_expr(self.head_dim_padded == self.head_dim_v_padded):
tdOcdO = tQcQ
t0dOcdO = t0QcQ
else:
cdO = cute.make_identity_tensor((self.m_block_size, self.head_dim_v_padded))
tdOcdO = gmem_thr_copy_VdO.partition_S(cdO)
t0dOcdO = gmem_thr_copy_VdO.get_slice(0).partition_S(cdO)
cLSE = cute.make_identity_tensor((self.m_block_size,))
tLSEcLSE = gmem_thr_copy_lse.partition_S(cLSE)
# Allocate predicate tensors for m and n, here we only allocate the tile of k, and
# use "if" on the mn dimension.
# This is to reduce register pressure and gets 2-3% performance gain.
d_head = mQ.shape[cute.rank(mQ) - 1]
d_head_v = mdO.shape[cute.rank(mdO) - 1]
tQpQ = utils.predicate_k(tQcQ, limit=d_head)
if cutlass.const_expr(self.same_hdim_kv):
tdOpdO = tQpQ
else:
tdOpdO = utils.predicate_k(tdOcdO, limit=d_head_v)
# group parameters for compute_one_m_block
mma_params = SimpleNamespace(
thr_mma_sdp=thr_mma_sdp, thr_mma_dkv=thr_mma_dkv, thr_mma_dq=thr_mma_dq,
tSrQ=tSrQ, tSrK=tSrK, tdPrdO=tdPrdO, tdPrV=tdPrV,
tdVrP=tdVrP, tdVrdO=tdVrdO, tdKrdS=tdKrdS, tdKrQ=tdKrQ,
tdQrdS=tdQrdS, tdQrK=tdQrK,
acc_dK=acc_dK, acc_dV=acc_dV,
)
smem_copy_params = SimpleNamespace(
smem_thr_copy_QdO=smem_thr_copy_QdO,
smem_thr_copy_KV=smem_thr_copy_KV,
smem_thr_copy_PdSt=smem_thr_copy_PdSt,
smem_thr_copy_QdOt=smem_thr_copy_QdOt,
smem_thr_copy_dS=smem_thr_copy_dS,
smem_thr_copy_Kt=smem_thr_copy_Kt,
r2s_thr_copy_PdS=r2s_thr_copy_PdS,
tSsQ=tSsQ, tSsK=tSsK, tdPsdO=tdPsdO, tdPsV=tdPsV,
tSsLSEMma=tSsLSEMma, tSsdPsumMma=tSsdPsumMma,
tPsP=tPsP, tdSsdS=tdSsdS,
tdVsPt=tdVsPt, tdVsdOt=tdVsdOt, tdKsdSt=tdKsdSt, tdKsQt=tdKsQt,
tdQsdS=tdQsdS, tdQsKt=tdQsKt,
)
gmem_copy_params = SimpleNamespace(
gmem_thr_copy_dQaccum=gmem_thr_copy_dQaccum, tdQgdQaccum=tdQgdQaccum
)
load_Q_LSE = partial(
self.load_Q_LSE, gmem_tiled_copy_QK, gmem_tiled_copy_LSE,
tQgQ, tQsQ, tQcQ, t0QcQ, tQpQ,
tLSEgLSE, tLSEsLSE, tLSEcLSE, seqlen=seqlen.seqlen_q
)
load_dO_dPsum = partial(
self.load_dO_dPsum, gmem_tiled_copy_VdO, gmem_tiled_copy_LSE,
tdOgdO, tdOsdO, tdOcdO, t0dOcdO, tdOpdO,
tLSEgdPsum, tLSEsdPsum, tLSEcLSE, seqlen=seqlen.seqlen_q
)
compute_one_m_block = partial(
self.compute_one_m_block, mma_params=mma_params,
smem_copy_params=smem_copy_params, gmem_copy_params=gmem_copy_params,
load_Q_LSE=load_Q_LSE, load_dO_dPsum=load_dO_dPsum,
m_block_max=m_block_max,
softmax_scale_log2=softmax_scale_log2,
)
# ///////////////////////////////////////////////////////////////////////////////
# Prologue
# ///////////////////////////////////////////////////////////////////////////////
# Start async loads of the last mn-tile, where we take care of the mn residue
self.load_V(gmem_thr_copy_VdO, tVgV, tVsV, n_block, seqlen=seqlen.seqlen_k,
headdim=d_head_v)
if cutlass.const_expr(self.V_in_regs):
cute.arch.cp_async_commit_group()
self.load_K(gmem_thr_copy_QK, tKgK, tKsK, n_block, seqlen=seqlen.seqlen_k,
headdim=d_head)
cute.arch.cp_async_commit_group()
if cutlass.const_expr(self.V_in_regs):
cute.arch.cp_async_wait_group(1)
cute.arch.barrier()
tdPrV_copy_view = smem_thr_copy_KV.retile(tdPrV)
cute.copy(smem_thr_copy_KV, tdPsV, tdPrV_copy_view)
# Sync to avoid loading Q to smem_q, which overlaps with smem_v
cute.arch.barrier()
m_block = m_block_min
assert self.num_stages_Q >= self.num_stages_dO
for stage in cutlass.range_constexpr(self.num_stages_Q):
if cutlass.const_expr(self.num_stages_Q == 1 or stage < self.num_stages_Q - 1):
if stage == 0 or m_block + stage < m_block_max:
load_Q_LSE(m_block + stage, smem_pipe_write_q=stage)
cute.arch.cp_async_commit_group()
if cutlass.const_expr(stage < self.num_stages_dO):
if stage == 0 or m_block + stage < m_block_max:
load_dO_dPsum(m_block + stage, smem_pipe_write_q=stage)
cute.arch.cp_async_commit_group()
# ///////////////////////////////////////////////////////////////////////////////
# Mainloop
# ///////////////////////////////////////////////////////////////////////////////
# Start processing of the first n-block.
mask = AttentionMask(self.m_block_size, self.n_block_size, seqlen.seqlen_q, seqlen.seqlen_k)
mask_fn = partial(
mask.apply_mask, n_block=n_block, thr_mma=thr_mma_sdp,
mask_seqlen=True, mask_causal=self.is_causal
)
smem_pipe_read_q = cutlass.Int32(0)
smem_pipe_read_do = cutlass.Int32(0)
smem_pipe_write_q = cutlass.Int32(self.num_stages_Q - 1)
smem_pipe_write_do = cutlass.Int32(0)
for m_tile in cutlass.range(m_block_min, m_block_max, unroll=1):
compute_one_m_block(
m_tile, smem_pipe_read_q, smem_pipe_read_do, smem_pipe_write_q, smem_pipe_write_do,
mask_fn=mask_fn,
)
smem_pipe_read_q = self.advance_pipeline(smem_pipe_read_q, self.num_stages_Q)
smem_pipe_read_do = self.advance_pipeline(smem_pipe_read_do, self.num_stages_dO)
smem_pipe_write_q = self.advance_pipeline(smem_pipe_write_q, self.num_stages_Q)
smem_pipe_write_do = self.advance_pipeline(smem_pipe_write_do, self.num_stages_dO)
# ///////////////////////////////////////////////////////////////////////////////
# Epilogue
# ///////////////////////////////////////////////////////////////////////////////
# If GQA, we scale dK in the postprocessing kernel instead
if cutlass.const_expr(self.qhead_per_kvhead == 1):
acc_dK.store(acc_dK.load() * softmax_scale)
# reuse sK and sV data iterator
sdK = cute.make_tensor(sK.iterator, sK_layout)
sdV = cute.make_tensor(sV.iterator, sV_layout)
self.epilogue(
acc_dK, acc_dV, mdK, mdV, sdK, sdV,
gmem_tiled_copy_dK, gmem_tiled_copy_dV, tiled_mma_dkv,
tidx, n_block, head_idx, batch_idx, seqlen, d_head, d_head_v
)
@cute.jit
def compute_one_m_block(
self,
m_block: cutlass.Int32,
smem_pipe_read_q: cutlass.Int32,
smem_pipe_read_do: cutlass.Int32,
smem_pipe_write_q: cutlass.Int32,
smem_pipe_write_do: cutlass.Int32,
mma_params: SimpleNamespace,
smem_copy_params: SimpleNamespace,
gmem_copy_params: SimpleNamespace,
load_Q_LSE: Callable,
load_dO_dPsum: Callable,
m_block_max: cutlass.Int32,
softmax_scale_log2: cutlass.Float32,
mask_fn: Optional[Callable] = None,
):
def load_Q_next():
m_block_next = m_block + (self.num_stages_Q - 1 if cutlass.const_expr(self.num_stages_Q > 1) else 1)
if m_block_next < m_block_max:
load_Q_LSE(m_block_next, smem_pipe_write_q)
cute.arch.cp_async_commit_group()
def load_dO_next():
if m_block + self.num_stages_dO < m_block_max:
load_dO_dPsum(m_block + self.num_stages_dO, smem_pipe_write_do)
cute.arch.cp_async_commit_group()
# MMA S
acc_shape_SdP = mma_params.thr_mma_sdp.partition_shape_C(
(self.m_block_size, self.n_block_size) if cutlass.const_expr(not self.SdP_swapAB) else (self.n_block_size, self.m_block_size)
)
acc_S = cute.make_fragment(acc_shape_SdP, cutlass.Float32)
acc_S.fill(0.0)
cute.arch.cp_async_wait_group(1 if cutlass.const_expr(self.num_stages_Q > 1) else 0)
cute.arch.barrier()
sm80_utils.gemm(
mma_params.thr_mma_sdp, acc_S, mma_params.tSrQ, mma_params.tSrK,
smem_copy_params.tSsQ[None, None, None, smem_pipe_read_q if cutlass.const_expr(self.num_stages_Q > 1) else 0],
smem_copy_params.tSsK,
smem_copy_params.smem_thr_copy_QdO, smem_copy_params.smem_thr_copy_KV,
swap_AB=self.SdP_swapAB,
)
tLSErLSE = cute.make_fragment_like(smem_copy_params.tSsLSEMma[None, 0])
cute.autovec_copy(
smem_copy_params.tSsLSEMma[None, smem_pipe_read_q if cutlass.const_expr(self.num_stages_Q > 1) else 0], tLSErLSE
)
if cutlass.const_expr(mask_fn is not None):
mask_fn(acc_S, m_block=m_block)
acc_S_mn = layout_utils.reshape_acc_to_mn(acc_S)
bidx = 0
# if cute.arch.thread_idx()[0] == 0 and cute.arch.block_idx()[0] == bidx: cute.print_tensor(acc_S_mn)
# if cute.arch.thread_idx()[0] == 0 and cute.arch.block_idx()[0] == 1: cute.print_tensor(tLSErLSE)
assert cute.size(acc_S_mn, mode=[0]) == cute.size(tLSErLSE)
for r in cutlass.range(cute.size(acc_S_mn, mode=[0]), unroll_full=True):
acc_S_mn[r, None].store(cute.math.exp2(acc_S_mn[r, None].load() * softmax_scale_log2 - tLSErLSE[r], fastmath=True))
# if cute.arch.thread_idx()[0] == 0 and cute.arch.block_idx()[0] == bidx: cute.print_tensor(acc_S_mn)
# MMA dP
acc_dP = cute.make_fragment(acc_shape_SdP, cutlass.Float32)
acc_dP.fill(0.0)
cute.arch.cp_async_wait_group(1 if cutlass.const_expr(self.num_stages_dO > 1) else 0)
cute.arch.barrier()
sm80_utils.gemm(
mma_params.thr_mma_sdp, acc_dP, mma_params.tdPrdO, mma_params.tdPrV,
smem_copy_params.tdPsdO[None, None, None, smem_pipe_read_do if cutlass.const_expr(self.num_stages_dO > 1) else 0],
smem_copy_params.tdPsV,
smem_copy_params.smem_thr_copy_QdO, smem_copy_params.smem_thr_copy_KV,
hook_fn=load_Q_next if cutlass.const_expr(self.num_stages_Q > 1) else None,
swap_AB=self.SdP_swapAB,
)
tLSErdPsum = cute.make_fragment_like(smem_copy_params.tSsdPsumMma[None, 0])
cute.autovec_copy(
smem_copy_params.tSsdPsumMma[None, smem_pipe_read_do if cutlass.const_expr(self.num_stages_dO > 1) else 0], tLSErdPsum
)
acc_dP_mn = layout_utils.reshape_acc_to_mn(acc_dP)
# if cute.arch.thread_idx()[0] == 0 and cute.arch.block_idx()[0] == bidx: cute.print_tensor(acc_dP_mn)
assert cute.size(acc_dP_mn, mode=[0]) == cute.size(tLSErdPsum)
for r in cutlass.range(cute.size(acc_dP_mn, mode=[0]), unroll_full=True):
acc_dP_mn[r, None].store(acc_S_mn[r, None].load() * (acc_dP_mn[r, None].load() - tLSErdPsum[r]))
# if cute.arch.thread_idx()[0] == 0 and cute.arch.block_idx()[0] == bidx: cute.print_tensor(acc_dP_mn)
rP = cute.make_fragment_like(acc_S, self.dtype)
rP.store(acc_S.load().to(self.dtype))
if cutlass.const_expr(not self.Mma_dKV_is_RS):
tPrP = smem_copy_params.r2s_thr_copy_PdS.retile(rP) # ((Atom,AtomNum), MMA_N, MMA_N)
cute.copy(smem_copy_params.r2s_thr_copy_PdS, tPrP, smem_copy_params.tPsP)
rdS = cute.make_fragment_like(acc_dP, self.dtype)
rdS.store(acc_dP.load().to(self.dtype))
if cutlass.const_expr(not self.Mma_dKV_is_RS):
cute.arch.barrier() # Make sure P is written
# For hdim 64, It's faster to write to smem_dS first before the dV gemm
if cutlass.const_expr(not self.Mma_dKV_is_RS):
tdSrdS = smem_copy_params.r2s_thr_copy_PdS.retile(rdS)
cute.copy(smem_copy_params.r2s_thr_copy_PdS, tdSrdS, smem_copy_params.tdSsdS)
if cutlass.const_expr(self.Mma_dKV_is_RS):
tdVrP = layout_utils.reshape_acc_to_frgA(rP)
else:
tdVrP = mma_params.tdVrP
# MMA dK
sm80_utils.gemm(
mma_params.thr_mma_dkv, mma_params.acc_dV, tdVrP, mma_params.tdVrdO,
smem_copy_params.tdVsPt,
smem_copy_params.tdVsdOt[None, None, None, smem_pipe_read_do if cutlass.const_expr(self.num_stages_dO > 1) else 0],
smem_copy_params.smem_thr_copy_PdSt, smem_copy_params.smem_thr_copy_QdOt,
A_in_regs=self.Mma_dKV_is_RS,
swap_AB=self.dKV_swapAB,
)
# if cute.arch.thread_idx()[0] == 0 and cute.arch.block_idx()[0] == bidx: cute.print_tensor(mma_params.acc_dV)
cute.arch.barrier() # Make sure dS is written
# MMA dQ
def dQ_mma(hook_fn):
acc_shape_dQ = mma_params.thr_mma_dq.partition_shape_C(
(self.m_block_size, self.head_dim_padded) if cutlass.const_expr(not self.dQ_swapAB) else (self.head_dim_padded, self.m_block_size)
)
acc_dQ = cute.make_fragment(acc_shape_dQ, cutlass.Float32)
acc_dQ.fill(0.0)
sm80_utils.gemm(
mma_params.thr_mma_dq, acc_dQ, mma_params.tdQrdS, mma_params.tdQrK,
smem_copy_params.tdQsdS, smem_copy_params.tdQsKt,
smem_copy_params.smem_thr_copy_dS, smem_copy_params.smem_thr_copy_Kt,
swap_AB=self.dQ_swapAB,
hook_fn=hook_fn
)
# ((1, 1), num_elements)
acc_dQ_atomic = gmem_copy_params.gmem_thr_copy_dQaccum.retile(acc_dQ)
tdQgdQaccum_atomic = gmem_copy_params.tdQgdQaccum[None, None, m_block]
assert cute.size(acc_dQ_atomic) == cute.size(tdQgdQaccum_atomic)
for i in cutlass.range(cute.size(acc_dQ_atomic), unroll_full=True):
utils.atomic_add_fp32(acc_dQ_atomic[i], utils.elem_pointer(tdQgdQaccum_atomic, i))
# utils.atomic_add_fp32(acc_dQ[i], tdQgdQaccum_atomic.iterator + i * tdQgdQaccum_atomic.stride[1])
# if cute.arch.thread_idx()[0] == 64 and cute.arch.block_idx()[0] == bidx: cute.print_tensor(acc_dQ)
# If num_stages_Q == 1, we want to do Mma_dK first so we can start loading Q for the next iteration
if cutlass.const_expr(self.num_stages_Q > 1):
dQ_mma(load_dO_next)
# MMA dK
if cutlass.const_expr(self.Mma_dKV_is_RS):
tdVrP = layout_utils.reshape_acc_to_frgA(rdS)
else:
tdKrdS = mma_params.tdKrdS
sm80_utils.gemm(
mma_params.thr_mma_dkv, mma_params.acc_dK, tdKrdS, mma_params.tdKrQ,
smem_copy_params.tdKsdSt,
smem_copy_params.tdKsQt[None, None, None, smem_pipe_read_q if cutlass.const_expr(self.num_stages_Q > 1) else 0],
smem_copy_params.smem_thr_copy_PdSt, smem_copy_params.smem_thr_copy_QdOt,
A_in_regs=self.Mma_dKV_is_RS,
swap_AB=self.dKV_swapAB,
hook_fn=load_dO_next if cutlass.const_expr(self.num_stages_Q == 1) else None,
)
# if cute.arch.thread_idx()[0] == 0: cute.print_tensor(mma_params.acc_dK)
if cutlass.const_expr(self.num_stages_Q == 1):
cute.arch.barrier()
dQ_mma(load_Q_next)
@cute.jit
def epilogue(
self,
acc_dK: cute.Tensor,
acc_dV: cute.Tensor,
mdK: cute.Tensor,
mdV: cute.Tensor,
sdK: cute.Tensor,
sdV: cute.Tensor,
gmem_tiled_copy_dK: cute.TiledCopy,
gmem_tiled_copy_dV: cute.TiledCopy,
tiled_mma: cute.TiledMma,
tidx: cutlass.Int32,
n_block: cutlass.Int32,
num_head: cutlass.Int32,
batch_size: cutlass.Int32,
seqlen: SeqlenInfoQK,
d_head: cutlass.Int32,
d_head_v: cutlass.Int32
):
rdV = cute.make_fragment_like(acc_dV, self.dtype)
rdV.store(acc_dV.load().to(self.dtype))
rdK = cute.make_fragment_like(acc_dK, self.dtype)
rdK.store(acc_dK.load().to(self.dtype))
gmem_thr_copy_dK = gmem_tiled_copy_dK.get_slice(tidx)
gmem_thr_copy_dV = gmem_tiled_copy_dV.get_slice(tidx)
batch_idx = batch_size
head_idx_kv = num_head // self.qhead_per_kvhead if cutlass.const_expr(not self.pack_gqa) else num_head
if cutlass.const_expr(self.qhead_per_kvhead == 1):
# Make sure all threads have finished reading K and V, otherwise we get racy dQ
# because smem_q could be changed.
cute.arch.barrier()
# smem copy atom for dKV
smem_copy_atom_dKV = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(), self.dtype, num_bits_per_copy=2 * self.dtype.width
)
smem_thr_copy_dKV = cute.make_tiled_copy_C(smem_copy_atom_dKV, tiled_mma).get_slice(tidx)
taccdVrdV = smem_thr_copy_dKV.retile(rdV)
taccdKrdK = smem_thr_copy_dKV.retile(rdK)
taccdVsdV = smem_thr_copy_dKV.partition_D(sdV)
taccdKsdK = smem_thr_copy_dKV.partition_D(sdK)
# copy acc O from rmem to smem with the smem copy atom
cute.copy(smem_copy_atom_dKV, taccdVrdV, taccdVsdV)
cute.copy(smem_copy_atom_dKV, taccdKrdK, taccdKsdK)
if cutlass.const_expr(not seqlen.has_cu_seqlens_k):
mdK_cur, mdV_cur = [t[batch_idx, None, head_idx_kv, None] for t in (mdK, mdV)]
else:
mdK_cur, mdV_cur = [cute.domain_offset((seqlen.offset_k, 0), t[None, head_idx_kv, None]) for t in (mdK, mdV)]
blkdK_shape = (self.n_block_size, self.head_dim_padded)
blkdV_shape = (self.n_block_size, self.head_dim_v_padded)
gdK = cute.local_tile(mdK_cur, blkdK_shape, (n_block, 0))
gdV = cute.local_tile(mdV_cur, blkdV_shape, (n_block, 0))
tdKsdK = gmem_thr_copy_dK.partition_S(sdK)
tdKgdK = gmem_thr_copy_dK.partition_D(gdK)
tdVsdV = gmem_thr_copy_dV.partition_S(sdV)
tdVgdV = gmem_thr_copy_dV.partition_D(gdV)
tdKrdK = cute.make_fragment_like(tdKgdK, self.dtype)
tdVrdV = cute.make_fragment_like(tdVgdV, self.dtype)
# sync before all smem stores are done.
cute.arch.barrier()
# load acc dK and dV from smem to rmem for wider vectorization
# Need to check OOB when reading from smem if kBlockN isn't evenly tiled
# TODO
cute.autovec_copy(tdKsdK, tdKrdK)
cute.autovec_copy(tdVsdV, tdVrdV)
cdK = cute.make_identity_tensor((self.n_block_size, self.head_dim_padded))
tdKcdK = gmem_thr_copy_dK.partition_S(cdK)
t0dKcdK = gmem_tiled_copy_dK.get_slice(0).partition_S(cdK)
if cutlass.const_expr(self.head_dim_padded == self.head_dim_v_padded):
tdVcdV = tdKcdK
t0dVcdV = t0dKcdK
else:
cdV = cute.make_identity_tensor((self.n_block_size, self.head_dim_v_padded))
tdVcdV = gmem_thr_copy_dV.partition_S(cdV)
t0dVcdV = gmem_tiled_copy_dV.get_slice(0).partition_S(cdV)
tdKpdK = utils.predicate_k(tdKcdK, limit=d_head)
if cutlass.const_expr(self.same_hdim_kv):
tdVpdV = tdKpdK
else:
tdVpdV = utils.predicate_k(tdVcdV, limit=d_head_v)
# copy acc dK and acc_dV from rmem to gmem
for rest_m in cutlass.range_constexpr(cute.size(tdKrdK.shape[1])):
if t0dKcdK[0, rest_m, 0][0] < seqlen.seqlen_k - n_block * self.n_block_size - tdKcdK[0][0]:
cute.copy(
gmem_tiled_copy_dK,
tdKrdK[None, rest_m, None],
tdKgdK[None, rest_m, None],
pred=tdKpdK[None, rest_m, None] if cutlass.const_expr(self.check_hdim_oob) else None,
)
for rest_m in cutlass.range_constexpr(cute.size(tdVrdV.shape[1])):
if t0dVcdV[0, rest_m, 0][0] < seqlen.seqlen_k - n_block * self.n_block_size - tdVcdV[0][0]:
cute.copy(
gmem_tiled_copy_dV,
tdVrdV[None, rest_m, None],
tdVgdV[None, rest_m, None],
pred=tdVpdV[None, rest_m, None] if cutlass.const_expr(self.check_hdim_v_oob) else None,
)
else: # qhead_per_kvhead > 1, do atomic add
# For Sm90, we need to sync to avoid racy writes to smem_q
# For Sm80, we don't need to sync since we're not touching smem
head_idx_kv = num_head // self.qhead_per_kvhead if cutlass.const_expr(not self.pack_gqa) else num_head
if cutlass.const_expr(not seqlen.has_cu_seqlens_k):
mdK_cur, mdV_cur = [t[batch_idx, head_idx_kv, None] for t in (mdK, mdV)]
else:
padded_offset_k = seqlen.offset_k + batch_idx * self.n_block_size
mdK_cur = cute.domain_offset((padded_offset_k * self.head_dim_padded,), mdK[head_idx_kv, None])
mdV_cur = cute.domain_offset((padded_offset_k * self.head_dim_v_padded,), mdV[head_idx_kv, None])
gdV = cute.local_tile(mdV_cur, (self.n_block_size * self.head_dim_v_padded,), (n_block,))
gdK = cute.local_tile(mdK_cur, (self.n_block_size * self.head_dim_padded,), (n_block,))
tdVgdVaccum = gmem_thr_copy_dV.partition_S(gdV)
tdKgdKaccum = gmem_thr_copy_dK.partition_S(gdK)
acc_dV_atomic = gmem_thr_copy_dV.retile(acc_dV)
acc_dK_atomic = gmem_thr_copy_dK.retile(acc_dK)
assert cute.size(acc_dV_atomic) == cute.size(tdVgdVaccum)
assert cute.size(acc_dK_atomic) == cute.size(tdKgdKaccum)
for i in cutlass.range(cute.size(acc_dV_atomic), unroll_full=True):
utils.atomic_add_fp32(acc_dV_atomic[i], utils.elem_pointer(tdVgdVaccum, i))
for i in cutlass.range(cute.size(acc_dK_atomic), unroll_full=True):
utils.atomic_add_fp32(acc_dK_atomic[i], utils.elem_pointer(tdKgdKaccum, i))
@cute.jit
def advance_pipeline(self, pipeline_index, num_stages: cutlass.Constexpr):
return pipeline_index + 1 if pipeline_index < num_stages - 1 else 0
@cute.jit
def load_K(
self,
gmem_thr_copy: cute.TiledCopy,
tKgK: cute.Tensor,
tKsK: cute.Tensor,
block: cutlass.Int32,
seqlen: cutlass.Int32,
headdim: cutlass.Int32,
):
cK = cute.make_identity_tensor((self.n_block_size, self.head_dim_padded))
tKcK = gmem_thr_copy.partition_S(cK)
t0KcK = gmem_thr_copy.get_slice(0).partition_S(cK)
tKpK = utils.predicate_k(tKcK, limit=headdim)
for n in cutlass.range_constexpr(cute.size(tKsK.shape[1])):
# If kBlockN doesn't evenly divide the tiled copy, only the last `n` needs to be checked
if self.is_even_n_smem_k or n < cute.size(tKsK.shape[1]) - 1 or tKcK[0, n, 0][0] < self.n_block_size:
# Instead of using tKcK, we using t0KcK and subtract the offset from the limit
# (seqlen - block * kBlockN). This is because the entries of t0KcK are known at compile time.
predicate_n = t0KcK[0, n, 0][0] < seqlen - block * self.n_block_size - tKcK[0][0]
predicate = cute.make_fragment_like(tKpK[None, 0, None])
for k in cutlass.range_constexpr(cute.size(predicate.shape[1])):
for i in cutlass.range_constexpr(cute.size(predicate.shape[0])):
predicate[i, k] = (tKpK[i, n, k] if cutlass.const_expr(self.check_hdim_oob) else True) and predicate_n
cute.copy(
gmem_thr_copy, tKgK[None, n, None], tKsK[None, n, None], pred=predicate,
)
# We need to clear the sK smem tiles since we'll use sKt for mma_dq
@cute.jit
def load_V(
self,
gmem_thr_copy: cute.TiledCopy,
tVgV: cute.Tensor,
tVsV: cute.Tensor,
block: cutlass.Int32,
seqlen: cutlass.Int32,
headdim: cutlass.Int32,
):
cV = cute.make_identity_tensor((self.n_block_size, self.head_dim_v_padded))
tVcV = gmem_thr_copy.partition_S(cV)
t0VcV = gmem_thr_copy.get_slice(0).partition_S(cV)
tVpV = utils.predicate_k(tVcV, limit=headdim)
for n in cutlass.range_constexpr(cute.size(tVsV.shape[1])):
# If kBlockN doesn't evenly divide the tiled copy, only the last `n` needs to be checked
if self.is_even_n_smem_v or n < cute.size(tVsV.shape[1]) - 1 or tVcV[0, n, 0][0] < self.n_block_size:
# Instead of using tVcV, we using t0VcV and subtract the offset from the limit
# (seqlen - block * kBlockN). This is because the entries of t0VcV are known at compile time.
predicate_n = t0VcV[0, n, 0][0] < seqlen - block * self.n_block_size - tVcV[0][0]
predicate = cute.make_fragment_like(tVpV[None, 0, None])
for k in cutlass.range_constexpr(cute.size(predicate.shape[1])):
for i in cutlass.range_constexpr(cute.size(predicate.shape[0])):
predicate[i, k] = (tVpV[i, n, k] if cutlass.const_expr(self.check_hdim_oob) else True) and predicate_n
cute.copy(
gmem_thr_copy, tVgV[None, n, None], tVsV[None, n, None], pred=predicate,
)
@cute.jit
def load_Q_LSE(
self,
gmem_tiled_copy_Q: cute.TiledCopy,
gmem_tiled_copy_LSE: cute.TiledCopy,
tQgQ: cute.Tensor,
tQsQ: cute.Tensor,
tQcQ: cute.Tensor,
t0QcQ: cute.Tensor,
tQpQ: cute.Tensor,
tLSEgLSE: cute.Tensor,
tLSEsLSE: cute.Tensor,
tLSEcLSE: cute.Tensor,
block: cutlass.Int32,
smem_pipe_write_q: cutlass.Int32,
seqlen: cutlass.Int32,
):
for m in cutlass.range_constexpr(cute.size(tQsQ.shape[1])):
# If kBlockM doesn't evenly divide the tiled copy, only the last `m` needs to be checked
if self.is_even_m_smem_q or m < cute.size(tQsQ.shape[1]) - 1 or tQcQ[0, m, 0][0] < self.m_block_size:
# Instead of using tQcQ, we using t0QcQ and subtract the offset from the limit
# (seqlen - block * kBlockM). This is because the entries of t0QcQ are known at compile time.
predicate_m = t0QcQ[0, m, 0][0] < seqlen - block * self.m_block_size - tQcQ[0][0]
predicate = cute.make_fragment_like(tQpQ[None, 0, None])
for k in cutlass.range_constexpr(cute.size(predicate.shape[1])):
for i in cutlass.range_constexpr(cute.size(predicate.shape[0])):
predicate[i, k] = (tQpQ[i, m, k] if cutlass.const_expr(self.check_hdim_oob) else True) and predicate_m
cute.copy(
gmem_tiled_copy_Q,
tQgQ[None, m, None, block],
tQsQ[None, m, None, smem_pipe_write_q if cutlass.const_expr(self.num_stages_Q) > 1 else 0],
pred=predicate,
)
# We need to clear the sQ smem tiles since we'll use sQt for mma_dK
# We made sure LSE length is padded so we read `kBlockM` elements so that all
# elements in sLSE are filled. Without this we might have uninitialized sLSE values.
for m in cutlass.range_constexpr(cute.size(tLSEsLSE.shape[1])):
if tLSEcLSE[0, m][0] < self.m_block_size:
cute.copy(
gmem_tiled_copy_LSE,
tLSEgLSE[None, m, block],
tLSEsLSE[None, m, smem_pipe_write_q if cutlass.const_expr(self.num_stages_Q > 1) else 0],
)
@cute.jit
def load_dO_dPsum(
self,
gmem_tiled_copy_dO: cute.TiledCopy,
gmem_tiled_copy_dPsum: cute.TiledCopy,
tdOgdO: cute.Tensor,
tdOsdO: cute.Tensor,
tdOcdO: cute.Tensor,
t0dOcdO: cute.Tensor,
tdOpdO: cute.Tensor,
tdPsumgdPsum: cute.Tensor,
tdPsumsdPsum: cute.Tensor,
tdPsumcdPsum: cute.Tensor,
block: cutlass.Int32,
smem_pipe_write_q: cutlass.Int32,
seqlen: cutlass.Int32,
):
for m in cutlass.range_constexpr(cute.size(tdOsdO.shape[1])):
# If kBlockM doesn't evenly divide the tiled copy, only the last `m` needs to be checked
if self.is_even_m_smem_do or m < cute.size(tdOsdO.shape[1]) - 1 or tdOcdO[0, m, 0][0] < self.m_block_size:
# Instead of using tdOcdO, we using t0dOcdO and subtract the offset from the limit
# (seqlen - block * kBlockM). This is because the entries of t0dOcdO are known at compile time.
predicate_m = t0dOcdO[0, m, 0][0] < seqlen - block * self.m_block_size - tdOcdO[0][0]
predicate = cute.make_fragment_like(tdOpdO[None, 0, None])
for k in cutlass.range_constexpr(cute.size(predicate.shape[1])):
for i in cutlass.range_constexpr(cute.size(predicate.shape[0])):
predicate[i, k] = (tdOpdO[i, m, k] if cutlass.const_expr(self.check_hdim_oob) else True) and predicate_m
cute.copy(
gmem_tiled_copy_dO,
tdOgdO[None, m, None, block],
tdOsdO[None, m, None, smem_pipe_write_q if cutlass.const_expr(self.num_stages_dO > 1) else 0],
pred=predicate,
)
# We need to clear the sQ smem tiles since we'll use sQt for mma_dK
# We made sure LSE length is padded so we read `kBlockM` elements so that all
# elements in sLSE are filled. Without this we might have uninitialized sLSE values.
for m in cutlass.range_constexpr(cute.size(tdPsumgdPsum.shape[1])):
if tdPsumcdPsum[0, m][0] < self.m_block_size:
cute.copy(
gmem_tiled_copy_dPsum,
tdPsumgdPsum[None, m, block],
tdPsumsdPsum[None, m, smem_pipe_write_q if cutlass.const_expr(self.num_stages_dO > 1) else 0],
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/flash_bwd.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/flash_bwd_postprocess.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
# A reimplementation of https://github.com/Dao-AILab/flash-attention/blob/main/hopper/flash_bwd_postprocess_kernel.h
# from Cutlass C++ to Cute-DSL.
import math
from typing import Callable, Optional, Type, Literal
import cuda.bindings.driver as cuda
import cutlass
import cutlass.cute as cute
import cutlass.utils.hopper_helpers as sm90_utils_basic
import cutlass.utils.blackwell_helpers as sm100_utils_basic
from cutlass.cute.nvgpu import cpasync, warp, warpgroup
from cutlass import Float32, const_expr
from cutlass.utils import LayoutEnum
from quack import copy_utils
from quack import layout_utils
from quack import sm90_utils
from flash_attn.cute import utils
from flash_attn.cute.cute_dsl_utils import assume_tensor_aligned
from flash_attn.cute import ampere_helpers as sm80_utils
from flash_attn.cute.seqlen_info import SeqlenInfoQK
import cutlass.cute.nvgpu.tcgen05 as tcgen05
from quack.cute_dsl_utils import ParamsBase
from flash_attn.cute.tile_scheduler import (
SingleTileScheduler,
SingleTileVarlenScheduler,
TileSchedulerArguments,
)
class FlashAttentionBackwardPostprocess:
def __init__(
self,
dtype: Type[cutlass.Numeric],
head_dim: int,
arch: Literal[80, 90, 100],
tile_m: int = 128,
num_threads: int = 256,
AtomLayoutMdQ: int = 1,
dQ_swapAB: bool = False,
use_2cta_instrs: bool = False,
cluster_size: int = 1, # for varlen offsets
):
"""
:param head_dim: head dimension
:type head_dim: int
:param tile_m: m block size
:type tile_m: int
"""
self.dtype = dtype
self.tile_m = tile_m
assert arch // 10 in [8, 9, 10, 11], (
"Only Ampere (8.x), Hopper (9.x), and Blackwell (10.x, 11.x) are supported"
)
self.arch = arch
# padding head_dim to a multiple of 32 as k_block_size
hdim_multiple_of = 32
self.tile_hdim = int(math.ceil(head_dim / hdim_multiple_of) * hdim_multiple_of)
self.check_hdim_oob = head_dim != self.tile_hdim
self.num_threads = num_threads
self.AtomLayoutMdQ = AtomLayoutMdQ
self.dQ_swapAB = dQ_swapAB
self.use_2cta_instrs = use_2cta_instrs and arch // 10 == 10 and head_dim != 64
self.cluster_size = cluster_size
@staticmethod
def can_implement(dtype, head_dim, tile_m, num_threads) -> bool:
"""Check if the kernel can be implemented with the given parameters.
:param dtype: data type
:type dtype: cutlass.Numeric
:param head_dim: head dimension
:type head_dim: int
:param tile_m: m block size
:type tile_m: int
:return: True if the kernel can be implemented, False otherwise
:rtype: bool
"""
if dtype not in [cutlass.Float16, cutlass.BFloat16]:
return False
if head_dim % 8 != 0:
return False
if num_threads % 32 != 0:
return False
return True
def _get_tiled_mma(self):
if const_expr(self.arch == 80):
num_mma_warps = self.num_threads // 32
atom_layout_dQ = (
(self.AtomLayoutMdQ, num_mma_warps // self.AtomLayoutMdQ, 1)
if const_expr(not self.dQ_swapAB)
else (num_mma_warps // self.AtomLayoutMdQ, self.AtomLayoutMdQ, 1)
)
tiled_mma = cute.make_tiled_mma(
warp.MmaF16BF16Op(self.dtype, Float32, (16, 8, 16)),
atom_layout_dQ,
permutation_mnk=(atom_layout_dQ[0] * 16, atom_layout_dQ[1] * 16, 16),
)
elif const_expr(self.arch == 90):
num_mma_warp_groups = self.num_threads // 128
atom_layout_dQ = (self.AtomLayoutMdQ, num_mma_warp_groups // self.AtomLayoutMdQ)
tiler_mn_dQ = (self.tile_m // atom_layout_dQ[0], self.tile_hdim // atom_layout_dQ[1])
tiled_mma = sm90_utils_basic.make_trivial_tiled_mma(
self.dtype,
self.dtype,
warpgroup.OperandMajorMode.K, # These don't matter, we only care about the accum
warpgroup.OperandMajorMode.K,
Float32,
atom_layout_mnk=(atom_layout_dQ if not self.dQ_swapAB else atom_layout_dQ[::-1])
+ (1,),
tiler_mn=tiler_mn_dQ if not self.dQ_swapAB else tiler_mn_dQ[::-1],
)
else:
cta_group = tcgen05.CtaGroup.ONE
tiled_mma = sm100_utils_basic.make_trivial_tiled_mma(
self.dtype,
tcgen05.OperandMajorMode.MN, # dS_major_mode
tcgen05.OperandMajorMode.MN, # Kt_major_mode
Float32,
cta_group,
(self.tile_m, self.tile_hdim),
)
if const_expr(self.arch in [80, 90]):
assert self.num_threads == tiled_mma.size
return tiled_mma
def _setup_attributes(self):
# ///////////////////////////////////////////////////////////////////////////////
# GMEM Tiled copy:
# ///////////////////////////////////////////////////////////////////////////////
# Thread layouts for copies
universal_copy_bits = 128
async_copy_elems_accum = universal_copy_bits // Float32.width
atom_async_copy_accum = cute.make_copy_atom(
cpasync.CopyG2SOp(cache_mode=cpasync.LoadCacheMode.GLOBAL),
Float32,
num_bits_per_copy=universal_copy_bits,
)
# We don't do bound checking for the gmem -> smem load so we just assert here.
assert (self.tile_m * self.tile_hdim // async_copy_elems_accum) % self.num_threads == 0
self.g2s_tiled_copy_dQaccum = cute.make_tiled_copy_tv(
atom_async_copy_accum,
cute.make_layout(self.num_threads),
cute.make_layout(async_copy_elems_accum),
)
num_s2r_copy_elems = 1 if const_expr(self.arch == 80) else 4
if const_expr(self.arch == 80):
self.s2r_tiled_copy_dQaccum = copy_utils.tiled_copy_1d(
Float32, self.num_threads, num_s2r_copy_elems
)
self.sdQaccum_layout = cute.make_layout(self.tile_m * self.tile_hdim)
elif const_expr(self.arch == 90):
num_threads_per_warp_group = 128
num_mma_warp_groups = self.num_threads // 128
self.s2r_tiled_copy_dQaccum = cute.make_tiled_copy_tv(
cute.make_copy_atom(cute.nvgpu.CopyUniversalOp(), Float32, num_bits_per_copy=128),
cute.make_layout((num_threads_per_warp_group, num_mma_warp_groups)), # thr_layout
cute.make_layout(128 // Float32.width), # val_layout
)
self.sdQaccum_layout = cute.make_layout(
(self.tile_m * self.tile_hdim // num_mma_warp_groups, num_mma_warp_groups)
)
else:
self.dQ_reduce_ncol = 32
dQaccum_reduce_stage = self.tile_hdim // self.dQ_reduce_ncol
assert self.num_threads == 128 # TODO: currently hard-coded
self.s2r_tiled_copy_dQaccum = copy_utils.tiled_copy_1d(
Float32, self.num_threads, num_s2r_copy_elems
)
self.sdQaccum_layout = cute.make_layout(
(self.tile_m * self.tile_hdim // dQaccum_reduce_stage, dQaccum_reduce_stage)
)
num_copy_elems = 128 // self.dtype.width
threads_per_row = math.gcd(128, self.tile_hdim) // num_copy_elems
self.gmem_tiled_copy_dQ = copy_utils.tiled_copy_2d(
self.dtype, threads_per_row, self.num_threads, num_copy_elems
)
# ///////////////////////////////////////////////////////////////////////////////
# Shared memory layout: dQ
# ///////////////////////////////////////////////////////////////////////////////
# We can't just use kHeadDim here. E.g. if MMA shape is 64 x 96 but split across 2 WGs,
# then setting kBlockKSmem to 32 will cause "Static shape_div failure".
# We want to treat it as 64 x 48, so kBlockKSmem should be 16.
mma_shape_n = self.tiled_mma.get_tile_size(1)
if const_expr(self.arch == 80):
sdQ_layout_atom = sm80_utils.get_smem_layout_atom(self.dtype, mma_shape_n)
self.sdQ_layout = cute.tile_to_shape(
sdQ_layout_atom, (self.tile_m, self.tile_hdim), (0, 1)
)
elif const_expr(self.arch == 90):
self.sdQ_layout = sm90_utils.make_smem_layout(
self.dtype, LayoutEnum.ROW_MAJOR, (self.tile_m, self.tile_hdim)
)
else:
# TODO: this is hard-coded for hdim 128
self.sdQ_layout = sm100_utils_basic.make_smem_layout_epi(
self.dtype, LayoutEnum.ROW_MAJOR, (self.tile_m, self.tile_hdim), 1
)
@cute.jit
def __call__(
self,
mdQaccum: cute.Tensor,
mdQ: cute.Tensor,
scale: cutlass.Float32,
mCuSeqlensQ: Optional[cute.Tensor],
mSeqUsedQ: Optional[cute.Tensor],
stream: cuda.CUstream,
):
# Get the data type and check if it is fp16 or bf16
if const_expr(mdQ.element_type not in [cutlass.Float16, cutlass.BFloat16]):
raise TypeError("Only Float16 or BFloat16 is supported")
if const_expr(mdQaccum is not None):
if const_expr(mdQaccum.element_type not in [cutlass.Float32]):
raise TypeError("dQaccum tensor must be Float32")
mdQaccum, mdQ = [assume_tensor_aligned(t) for t in (mdQaccum, mdQ)]
self.tiled_mma = self._get_tiled_mma()
self._setup_attributes()
smem_size = max(
cute.size_in_bytes(cutlass.Float32, self.sdQaccum_layout),
cute.size_in_bytes(self.dtype, self.sdQ_layout),
)
if const_expr(mCuSeqlensQ is not None):
TileScheduler = SingleTileVarlenScheduler
num_head = mdQ.shape[1]
num_batch = mCuSeqlensQ.shape[0] - 1
num_block = cute.ceil_div(mdQ.shape[0], self.tile_m)
else:
TileScheduler = SingleTileScheduler
num_head = mdQ.shape[2]
num_batch = mdQ.shape[0]
num_block = cute.ceil_div(mdQ.shape[1], self.tile_m)
tile_sched_args = TileSchedulerArguments(
num_block=num_block,
num_head=num_head,
num_batch=num_batch,
num_splits=1,
seqlen_k=0,
headdim=mdQ.shape[2],
headdim_v=0,
total_q=mdQ.shape[0],
tile_shape_mn=(self.tile_m, 1),
mCuSeqlensQ=mCuSeqlensQ,
mSeqUsedQ=mSeqUsedQ,
)
tile_sched_params = TileScheduler.to_underlying_arguments(tile_sched_args)
grid_dim = TileScheduler.get_grid_shape(tile_sched_params)
# grid_dim: (m_block, num_head, batch_size)
self.kernel(
mdQaccum,
mdQ,
mCuSeqlensQ,
mSeqUsedQ,
scale,
self.tiled_mma,
self.dQ_swapAB,
self.sdQaccum_layout,
self.sdQ_layout,
self.g2s_tiled_copy_dQaccum,
self.s2r_tiled_copy_dQaccum,
self.gmem_tiled_copy_dQ,
tile_sched_params,
TileScheduler,
).launch(
grid=grid_dim,
block=[self.num_threads, 1, 1],
smem=smem_size,
stream=stream,
)
@cute.kernel
def kernel(
self,
mdQaccum: cute.Tensor,
mdQ: cute.Tensor,
mCuSeqlensQ: Optional[cute.Tensor],
mSeqUsedQ: Optional[cute.Tensor],
scale: cutlass.Float32,
tiled_mma: cute.TiledMma,
dQ_swapAB: cutlass.Constexpr,
sdQaccum_layout: cute.Layout,
sdQ_layout: cute.ComposedLayout,
g2s_tiled_copy_dQaccum: cute.TiledCopy,
s2r_tiled_copy_dQaccum: cute.TiledCopy,
gmem_tiled_copy_dQ: cute.TiledCopy,
tile_sched_params: ParamsBase,
TileScheduler: cutlass.Constexpr[Callable],
):
# ///////////////////////////////////////////////////////////////////////////////
# Get shared memory buffer
# ///////////////////////////////////////////////////////////////////////////////
smem = cutlass.utils.SmemAllocator()
sdQaccum = smem.allocate_tensor(cutlass.Float32, sdQaccum_layout, byte_alignment=1024)
sdQaccum_flat = cute.make_tensor(sdQaccum.iterator, cute.make_layout(cute.size(sdQaccum)))
if const_expr(self.arch in [80, 90]):
sdQ = cute.make_tensor(cute.recast_ptr(sdQaccum.iterator, dtype=self.dtype), sdQ_layout)
else:
# extra stage dimension
sdQ = cute.make_tensor(
cute.recast_ptr(sdQaccum.iterator, sdQ_layout.inner, dtype=self.dtype),
sdQ_layout.outer,
)[None, None, 0]
sdQt = layout_utils.transpose_view(sdQ)
# Thread index, block index
tidx, _, _ = cute.arch.thread_idx()
tile_scheduler = TileScheduler.create(tile_sched_params)
work_tile = tile_scheduler.initial_work_tile_info()
m_block, head_idx, batch_idx, _ = work_tile.tile_idx
if work_tile.is_valid_tile:
# ///////////////////////////////////////////////////////////////////////////////
# Get the appropriate tiles for this thread block.
# ///////////////////////////////////////////////////////////////////////////////
seqlen = SeqlenInfoQK.create(
batch_idx,
mdQ.shape[1],
0,
mCuSeqlensQ=mCuSeqlensQ,
mCuSeqlensK=None,
mSeqUsedQ=mSeqUsedQ,
mSeqUsedK=None,
tile_m=self.tile_m * self.cluster_size,
)
if const_expr(not seqlen.has_cu_seqlens_q):
mdQ_cur = mdQ[batch_idx, None, head_idx, None]
mdQaccum_cur = mdQaccum[batch_idx, head_idx, None]
head_dim = mdQ.shape[3]
else:
if cutlass.const_expr(self.arch >= 90):
padded_offset_q = seqlen.padded_offset_q
else:
padded_offset_q = seqlen.offset_q + batch_idx * self.tile_m
mdQ_cur = cute.domain_offset((seqlen.offset_q, 0), mdQ[None, head_idx, None])
mdQaccum_cur = cute.domain_offset(
(padded_offset_q * self.tile_hdim,), mdQaccum[head_idx, None]
)
head_dim = mdQ.shape[2]
# HACK: Compiler doesn't seem to recognize that padding
# by padded_offset_q * self.tile_hdim keeps alignment
# since statically divisible by 4
mdQaccum_cur_ptr = cute.make_ptr(
dtype=mdQaccum_cur.element_type,
value=mdQaccum_cur.iterator.toint(),
mem_space=mdQaccum_cur.iterator.memspace,
assumed_align=mdQaccum.iterator.alignment,
)
mdQaccum_cur = cute.make_tensor(mdQaccum_cur_ptr, mdQaccum_cur.layout)
gdQaccum = cute.local_tile(mdQaccum_cur, (self.tile_m * self.tile_hdim,), (m_block,))
gdQ = cute.local_tile(mdQ_cur, (self.tile_m, self.tile_hdim), (m_block, 0))
seqlen_q = seqlen.seqlen_q
seqlen_q_rounded = cute.round_up(seqlen_q, self.tile_m)
if const_expr(self.arch // 10 == 10 and self.use_2cta_instrs):
# 2-CTA: remap dQaccum layout into TMEM view before writing sdQ
num_reduce_threads = self.num_threads
thr_mma_dsk = tiled_mma.get_slice(tidx)
dQacc_shape = thr_mma_dsk.partition_shape_C((self.tile_m, self.tile_hdim))
tdQtdQ = thr_mma_dsk.make_fragment_C(dQacc_shape)
tdQtdQ = cute.make_tensor(tdQtdQ.iterator, tdQtdQ.layout)
tmem_load_atom = cute.make_copy_atom(
tcgen05.copy.Ld32x32bOp(tcgen05.copy.Repetition(self.dQ_reduce_ncol)), Float32
)
tiled_tmem_ld = tcgen05.make_tmem_copy(tmem_load_atom, tdQtdQ)
thr_tmem_ld = tiled_tmem_ld.get_slice(tidx)
cdQ = cute.make_identity_tensor((self.tile_m, self.tile_hdim))
tdQcdQ = thr_mma_dsk.partition_C(cdQ)
tdQcdQ_tensor = cute.make_tensor(tdQcdQ.iterator, tdQcdQ.layout)
tdQrdQ = thr_tmem_ld.partition_D(tdQcdQ_tensor)
tiled_copy_accum = s2r_tiled_copy_dQaccum
g2s_thr_copy = tiled_copy_accum.get_slice(tidx)
# S -> R
tdQrdQ_fp32 = cute.make_fragment(tdQrdQ.shape, cutlass.Float32)
tdQrdQ_s2r = cute.make_tensor(tdQrdQ_fp32.iterator, tdQrdQ_fp32.shape)
smem_copy_atom = sm100_utils_basic.get_smem_store_op(
LayoutEnum.ROW_MAJOR, self.dtype, cutlass.Float32, tiled_tmem_ld
)
r2s_tiled_copy = cute.make_tiled_copy(
smem_copy_atom,
layout_tv=tiled_tmem_ld.layout_dst_tv_tiled,
tiler_mn=tiled_tmem_ld.tiler_mn,
)
tdQsdQ_r2s = thr_tmem_ld.partition_D(thr_mma_dsk.partition_C(sdQ))
tdQrdQ_r2s = cute.make_fragment(tdQsdQ_r2s.shape, self.dtype)
num_stages = cute.size(tdQrdQ_fp32, mode=[1])
stage_stride = self.dQ_reduce_ncol
row_groups = 2
assert num_stages % row_groups == 0
assert num_reduce_threads % row_groups == 0
stage_groups = num_stages // row_groups
threads_per_row_group = num_reduce_threads // row_groups
stage_loads = tuple((row_group, row_group) for row_group in range(row_groups))
stage_iters = tuple(
(row_group, row_group * threads_per_row_group)
for row_group in range(row_groups)
)
s2r_lane = tidx % threads_per_row_group
s2r_buf = tidx // threads_per_row_group
gdQaccum_layout_g2s = cute.make_layout(
shape=(self.tile_m * self.dQ_reduce_ncol, 1), stride=(1, 0)
)
sdQaccum_g2s = g2s_thr_copy.partition_D(sdQaccum)
# G -> S
for stage_group in cutlass.range_constexpr(stage_groups):
for stage_offset, smem_buf in stage_loads:
stage_idx = stage_group + stage_offset * stage_groups
gdQaccum_stage = cute.local_tile(
gdQaccum,
(self.tile_m * self.dQ_reduce_ncol,),
(stage_idx,),
)
gdQaccum_stage_g2s = cute.make_tensor(
gdQaccum_stage.iterator,
gdQaccum_layout_g2s,
)
tdQgdQ = g2s_thr_copy.partition_S(gdQaccum_stage_g2s)
cute.copy(
g2s_thr_copy,
tdQgdQ[None, None, 0],
sdQaccum_g2s[None, None, smem_buf],
)
cute.arch.fence_view_async_shared()
cute.arch.barrier(barrier_id=6, number_of_threads=num_reduce_threads)
# S -> R
for stage_offset, lane_offset in stage_iters:
stage_idx = stage_group + stage_offset * stage_groups
s2r_src_tidx = s2r_lane + lane_offset
s2r_thr_copy = tiled_copy_accum.get_slice(s2r_src_tidx)
sdQaccum_src = s2r_thr_copy.partition_S(sdQaccum)[None, None, s2r_buf]
tdQrdQ_s2r_cpy = tdQrdQ_s2r[None, stage_idx, None, None]
tdQrdQ_r2s_cpy = cute.make_tensor(
tdQrdQ_s2r_cpy.iterator, cute.make_layout(sdQaccum_src.shape)
)
cute.copy(s2r_thr_copy, sdQaccum_src, tdQrdQ_r2s_cpy)
cute.arch.fence_view_async_shared()
cute.arch.barrier(barrier_id=7, number_of_threads=num_reduce_threads)
# R -> S
stage_lo = stage_idx % stage_stride
stage_hi = stage_idx // stage_stride
tdQrdQ_r2s_cpy = cute.make_tensor(
cute.recast_ptr(tdQrdQ_r2s_cpy.iterator),
tdQrdQ_r2s[((None, 0), (stage_lo, stage_hi), 0, 0)].shape,
)
dQ_vec = tdQrdQ_r2s_cpy.load() * scale
tdQrdQ_r2s[((None, 0), (stage_lo, stage_hi), 0, 0)].store(
dQ_vec.to(self.dtype)
)
# R -> S
cute.copy(
r2s_tiled_copy,
tdQrdQ_r2s[None, None, None, 0],
tdQsdQ_r2s[None, None, None, 0],
)
cute.arch.fence_view_async_shared()
cute.arch.barrier(barrier_id=8, number_of_threads=num_reduce_threads)
else:
# Step 1: load dQaccum from gmem to smem
g2s_thr_copy_dQaccum = g2s_tiled_copy_dQaccum.get_slice(tidx)
tdQgdQaccum = g2s_thr_copy_dQaccum.partition_S(gdQaccum)
tdQsdQaccumg2s = g2s_thr_copy_dQaccum.partition_D(sdQaccum_flat)
cute.copy(g2s_tiled_copy_dQaccum, tdQgdQaccum, tdQsdQaccumg2s)
cute.arch.cp_async_commit_group()
cute.arch.cp_async_wait_group(0)
cute.arch.barrier()
# Step 2: load dQ from smem to rmem
s2r_thr_copy_dQaccum = s2r_tiled_copy_dQaccum.get_slice(tidx)
tdQsdQaccum = s2r_thr_copy_dQaccum.partition_S(sdQaccum)
tile_shape = (self.tile_m, self.tile_hdim)
acc = None
tiled_copy_t2r = None
if const_expr(self.arch in [80, 90]):
acc_shape = tiled_mma.partition_shape_C(
tile_shape if const_expr(not dQ_swapAB) else tile_shape[::-1]
)
acc = cute.make_fragment(acc_shape, cutlass.Float32)
assert cute.size(acc) == cute.size(tdQsdQaccum)
else:
thr_mma = tiled_mma.get_slice(0) # 1-CTA
dQacc_shape = tiled_mma.partition_shape_C((self.tile_m, self.tile_hdim))
tdQtdQ = tiled_mma.make_fragment_C(dQacc_shape)
tdQcdQ = thr_mma.partition_C(
cute.make_identity_tensor((self.tile_m, self.tile_hdim))
)
tmem_load_atom = cute.make_copy_atom(
tcgen05.copy.Ld32x32bOp(tcgen05.copy.Repetition(self.dQ_reduce_ncol)),
Float32,
)
tiled_copy_t2r = tcgen05.make_tmem_copy(tmem_load_atom, tdQtdQ)
thr_copy_t2r = tiled_copy_t2r.get_slice(tidx)
tdQrdQ_t2r_shape = thr_copy_t2r.partition_D(tdQcdQ).shape
acc = cute.make_fragment(tdQrdQ_t2r_shape, Float32)
tdQrdQaccum = cute.make_tensor(acc.iterator, cute.make_layout(tdQsdQaccum.shape))
cute.autovec_copy(tdQsdQaccum, tdQrdQaccum)
# Convert tdQrdQaccum from fp32 to fp16/bf16
rdQ = cute.make_fragment_like(acc, self.dtype)
rdQ.store((acc.load() * scale).to(self.dtype))
# Step 3: Copy dQ from register to smem
cute.arch.barrier() # make sure all threads have finished loading dQaccum
if const_expr(self.arch in [80, 90]):
copy_atom_r2s_dQ = utils.get_smem_store_atom(
self.arch, self.dtype, transpose=self.dQ_swapAB
)
tiled_copy_r2s_dQ = cute.make_tiled_copy_C(copy_atom_r2s_dQ, tiled_mma)
else:
# copy_atom_r2s_dQ = sm100_utils_basic.get_smem_store_op(
# LayoutEnum.ROW_MAJOR, self.dtype, Float32, tiled_copy_t2r,
# )
# tiled_copy_r2s_dQ = cute.make_tiled_copy_D(copy_atom_r2s_dQ, tiled_copy_t2r)
thr_layout_r2s_dQ = cute.make_layout((self.num_threads, 1)) # 128 threads
val_layout_r2s_dQ = cute.make_layout((1, 128 // self.dtype.width))
copy_atom_r2s_dQ = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(),
self.dtype,
num_bits_per_copy=128,
)
tiled_copy_r2s_dQ = cute.make_tiled_copy_tv(
copy_atom_r2s_dQ, thr_layout_r2s_dQ, val_layout_r2s_dQ
)
thr_copy_r2s_dQ = tiled_copy_r2s_dQ.get_slice(tidx)
cdQ = cute.make_identity_tensor((self.tile_m, self.tile_hdim))
if const_expr(self.arch in [80, 90]):
taccdQrdQ = thr_copy_r2s_dQ.retile(rdQ)
else:
taccdQcdQ_shape = thr_copy_r2s_dQ.partition_S(cdQ).shape
taccdQrdQ = cute.make_tensor(rdQ.iterator, taccdQcdQ_shape)
taccdQsdQ = thr_copy_r2s_dQ.partition_D(
sdQ if const_expr(not self.dQ_swapAB) else sdQt
)
cute.copy(thr_copy_r2s_dQ, taccdQrdQ, taccdQsdQ)
# Step 4: Copy dQ from smem to register to prepare for coalesced write to gmem
cute.arch.barrier() # make sure all smem stores are done
gmem_thr_copy_dQ = gmem_tiled_copy_dQ.get_slice(tidx)
tdQgdQ = gmem_thr_copy_dQ.partition_S(gdQ)
tdQsdQ = gmem_thr_copy_dQ.partition_D(sdQ)
tdQrdQ = cute.make_fragment_like(tdQsdQ, self.dtype)
# TODO: check OOB when reading from smem if kBlockM isn't evenly tiled
cute.autovec_copy(tdQsdQ, tdQrdQ)
# Step 5: Copy dQ from register to gmem
tdQcdQ = gmem_thr_copy_dQ.partition_S(cdQ)
tdQpdQ = utils.predicate_k(tdQcdQ, limit=head_dim)
for rest_m in cutlass.range(cute.size(tdQrdQ.shape[1]), unroll_full=True):
if tdQcdQ[0, rest_m, 0][0] < seqlen_q - m_block * self.tile_m:
cute.copy(
gmem_tiled_copy_dQ,
tdQrdQ[None, rest_m, None],
tdQgdQ[None, rest_m, None],
pred=tdQpdQ[None, rest_m, None],
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/flash_bwd_postprocess.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 537,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/flash_fwd.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
# A reimplementation of
# https://github.com/Dao-AILab/flash-attention/blob/main/hopper/flash_fwd_kernel_sm80.h
# and https://github.com/Dao-AILab/flash-attention/blob/main/hopper/flash_fwd_kernel_sm90.h
# from Cutlass C++ to Cute-DSL.
# Built on Cute-DSL example: https://github.com/NVIDIA/cutlass/blob/main/examples/python/CuTeDSL/ampere/flash_attention_v2.py
import math
from types import SimpleNamespace
from typing import Type, Callable, Optional, List
from functools import partial
import cuda.bindings.driver as cuda
import cutlass
import cutlass.cute as cute
from cutlass import Constexpr, Float32, Int32, const_expr, Boolean
from cutlass.cute.nvgpu import cpasync, warp, warpgroup
import cutlass.utils as utils_basic
from cutlass.utils import LayoutEnum
import cutlass.utils.hopper_helpers as sm90_utils_basic
from quack import copy_utils
from quack import layout_utils
from quack import sm90_utils
from flash_attn.cute import ampere_helpers as sm80_utils
from flash_attn.cute.cute_dsl_utils import assume_tensor_aligned
from flash_attn.cute import utils
from flash_attn.cute.mask import AttentionMask
from flash_attn.cute.softmax import Softmax, apply_score_mod_inner
from flash_attn.cute.seqlen_info import SeqlenInfoQK
from flash_attn.cute.block_info import BlockInfo
from flash_attn.cute.block_sparsity import BlockSparseTensors
from flash_attn.cute.block_sparse_utils import (
produce_block_sparse_loads,
consume_block_sparse_loads,
)
from flash_attn.cute import pipeline
from flash_attn.cute.pack_gqa import PackGQA
from flash_attn.cute.named_barrier import NamedBarrierFwd
from quack.cute_dsl_utils import ParamsBase
from flash_attn.cute.tile_scheduler import (
TileSchedulerArguments,
SingleTileScheduler,
SingleTileLPTScheduler,
SingleTileVarlenScheduler,
)
from cutlass.cute import FastDivmodDivisor
class FlashAttentionForwardBase:
arch: int = 80
def __init__(
self,
dtype: Type[cutlass.Numeric],
head_dim: int,
head_dim_v: Optional[int] = None,
qhead_per_kvhead: int = 1,
is_causal: bool = False,
is_local: bool = False,
pack_gqa: bool = True,
tile_m: int = 128,
tile_n: int = 128,
num_stages: int = 1,
num_threads: int = 128,
Q_in_regs: bool = False,
score_mod: Optional[cutlass.Constexpr] = None,
mask_mod: Optional[cutlass.Constexpr] = None,
has_aux_tensors: bool = False,
q_subtile_factor: int | None = None,
):
"""Initializes the configuration for a flash attention kernel.
All contiguous dimensions must be at least 16 bytes aligned, which means that the head dimension
should be a multiple of 8.
:param head_dim: head dimension
:type head_dim: int
:param tile_m: m block size
:type tile_m: int
:param tile_n: n block size
:type tile_n: int
:param num_threads: number of threads
:type num_threads: int
:param is_causal: is causal
:param score_mod: A callable that takes the attention scores and applies a modification.
Callable signature: ``score_mod(scores, batch_idx, head_idx, q_idx, kv_idx, aux_tensors) -> Any``
:param mask_mod: A callable that takes the attention scores and returns a boolean representing whether that score should be masked.
Callable signature: ``mask_mod(batch_idx, head_idx, q_idx, kv_idx, aux_tensors) -> Boolean``
"""
self.dtype = dtype
# padding head_dim to a multiple of 16 as k_block_size
hdim_multiple_of = 16
self.tile_hdim = int(math.ceil(head_dim / hdim_multiple_of) * hdim_multiple_of)
head_dim_v = head_dim_v if head_dim_v is not None else head_dim
self.same_hdim_kv = head_dim == head_dim_v
self.tile_hdimv = int(math.ceil(head_dim_v / hdim_multiple_of) * hdim_multiple_of)
# Can save registers (and hence be faster) if we don't have to check hdim predication
self.check_hdim_oob = head_dim != self.tile_hdim
self.check_hdim_v_oob = head_dim_v != self.tile_hdimv
self.qhead_per_kvhead = qhead_per_kvhead
self.is_causal = is_causal
self.is_local = is_local
self.pack_gqa = pack_gqa
self.tile_m = tile_m
self.tile_n = tile_n
self.num_threads = num_threads
self.num_stages = num_stages
self.q_subtile_factor = q_subtile_factor
self.Q_in_regs = Q_in_regs
self.score_mod = score_mod
self.mask_mod = mask_mod
self.qk_acc_dtype = Float32
self.vec_size: cutlass.Constexpr = getattr(
score_mod, "__vec_size__", 1 if cutlass.const_expr(has_aux_tensors) else 2
)
@staticmethod
def can_implement(
dtype,
head_dim,
head_dim_v,
tile_m,
tile_n,
num_stages,
num_threads,
is_causal,
Q_in_regs=False,
) -> bool:
"""Check if the kernel can be implemented with the given parameters.
:param dtype: data type
:type dtype: cutlass.Numeric
:param head_dim: head dimension
:type head_dim: int
:param tile_m: m block size
:type tile_m: int
:param tile_n: n block size
:type tile_n: int
:param num_threads: number of threads
:type num_threads: int
:param is_causal: is causal
:type is_causal: bool
:return: True if the kernel can be implemented, False otherwise
:rtype: bool
"""
if dtype not in [cutlass.Float16, cutlass.BFloat16]:
return False
if head_dim % 8 != 0:
return False
if head_dim_v % 8 != 0:
return False
if tile_n % 16 != 0:
return False
if num_threads % 32 != 0:
return False
# Check if block size setting is out of shared memory capacity
# Shared memory usage: Q tile + (K tile + V tile) where K and V use the same tile size
smem_usage_Q = tile_m * head_dim * 2
smem_usage_K = tile_n * head_dim * num_stages * 2
smem_usage_V = tile_n * head_dim_v * num_stages * 2
smem_usage_QV = (
(smem_usage_Q + smem_usage_V) if not Q_in_regs else max(smem_usage_Q, smem_usage_V)
)
smem_usage = smem_usage_QV + smem_usage_K
# TODO: sm86 and sm89
smem_capacity = utils_basic.get_smem_capacity_in_bytes("sm_80")
if smem_usage > smem_capacity:
return False
# Check if twice the block size is divisible by the number of threads
if (tile_m * 2) % num_threads != 0:
return False
return True
def _check_type(
self,
mQ_type: Type[cutlass.Numeric],
mK_type: Type[cutlass.Numeric],
mV_type: Type[cutlass.Numeric],
mO_type: Type[cutlass.Numeric],
mLSE_type: Type[cutlass.Numeric] | None,
mCuSeqlensQ_type: Type[cutlass.Numeric] | None,
mCuSeqlensK_type: Type[cutlass.Numeric] | None,
mSeqUsedQ_type: Type[cutlass.Numeric] | None,
mSeqUsedK_type: Type[cutlass.Numeric] | None,
):
# Get the data type and check if it is fp16 or bf16
if const_expr(not (mQ_type == mK_type == mV_type == mO_type)):
raise TypeError("All tensors must have the same data type")
if const_expr(mQ_type not in [cutlass.Float16, cutlass.BFloat16]):
raise TypeError("Only Float16 or BFloat16 is supported")
if const_expr(mLSE_type not in [None, Float32]):
raise TypeError("LSE tensor must be Float32")
if const_expr(mCuSeqlensQ_type not in [None, Int32]):
raise TypeError("cu_seqlens_q tensor must be Int32")
if const_expr(mCuSeqlensK_type not in [None, Int32]):
raise TypeError("cu_seqlens_k tensor must be Int32")
if const_expr(mSeqUsedQ_type not in [None, Int32]):
raise TypeError("seqused_q tensor must be Int32")
if const_expr(mSeqUsedK_type not in [None, Int32]):
raise TypeError("seqused_k tensor must be Int32")
assert mQ_type == self.dtype
def _setup_attributes(self):
# ///////////////////////////////////////////////////////////////////////////////
# Shared memory layout: Q/K/V
# ///////////////////////////////////////////////////////////////////////////////
sQ_layout_atom, sK_layout_atom, sV_layout_atom, sO_layout_atom, sP_layout_atom = (
self._get_smem_layout_atom()
)
self.sQ_layout = cute.tile_to_shape(
sQ_layout_atom,
(self.tile_m, self.tile_hdim),
(0, 1),
)
self.sK_layout = cute.tile_to_shape(
sK_layout_atom,
(self.tile_n, self.tile_hdim, self.num_stages),
(0, 1, 2),
)
self.sV_layout = cute.tile_to_shape(
sV_layout_atom,
(self.tile_n, self.tile_hdimv, self.num_stages),
(0, 1, 2),
)
self.sO_layout = cute.tile_to_shape(
sO_layout_atom,
(self.tile_m, self.tile_hdimv),
(0, 1),
)
if const_expr(sP_layout_atom is not None):
self.sP_layout = cute.tile_to_shape(
sP_layout_atom,
(self.tile_m, self.tile_n),
(0, 1),
)
else:
self.sP_layout = None
# ///////////////////////////////////////////////////////////////////////////////
# GMEM Tiled copy:
# ///////////////////////////////////////////////////////////////////////////////
# Thread layouts for copies
universal_copy_bits = 128
async_copy_elems = universal_copy_bits // self.dtype.width
# atom_async_copy: async copy atom for QKV load
atom_async_copy = cute.make_copy_atom(
cpasync.CopyG2SOp(cache_mode=cpasync.LoadCacheMode.GLOBAL),
self.dtype,
num_bits_per_copy=universal_copy_bits,
)
# atom_universal_copy: universal copy atom for O store
atom_universal_copy = cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(),
self.dtype,
num_bits_per_copy=universal_copy_bits,
)
# tQ_layout and tK_layout: thread layout for QK load
tQK_shape_dim_1 = sQ_layout_atom.outer.shape[1] // async_copy_elems
assert self.num_Q_load_threads % tQK_shape_dim_1 == 0, (
"num_threads must be divisible by tQK_shape_dim_1"
)
assert self.num_producer_threads % tQK_shape_dim_1 == 0, (
"num_threads must be divisible by tQK_shape_dim_1"
)
tQ_layout = cute.make_ordered_layout(
(self.num_Q_load_threads // tQK_shape_dim_1, tQK_shape_dim_1),
order=(1, 0),
)
tK_layout = cute.make_ordered_layout(
(self.num_producer_threads // tQK_shape_dim_1, tQK_shape_dim_1),
order=(1, 0),
)
# So that we don't have to check if we overshoot kBlockM when we load Q
assert self.tile_m % tQ_layout.shape[0] == 0
tV_shape_dim_1 = sV_layout_atom.outer.shape[1] // async_copy_elems
tV_layout = cute.make_ordered_layout(
(self.num_producer_threads // tV_shape_dim_1, tV_shape_dim_1),
order=(1, 0),
)
# TODO: need a different layout for O if O dtype is not the same as V dtype
# tO_layout: thread layout for O store
tO_layout = cute.make_ordered_layout(
(self.num_epilogue_threads // tV_shape_dim_1, tV_shape_dim_1),
order=(1, 0),
)
# So that we don't have to check if we overshoot kBlockM when we store O
assert self.tile_m % tO_layout.shape[0] == 0
# Value layouts for copies
vQKV_layout = cute.make_layout((1, async_copy_elems))
vO_layout = vQKV_layout
self.gmem_tiled_copy_Q = cute.make_tiled_copy_tv(atom_async_copy, tQ_layout, vQKV_layout)
self.gmem_tiled_copy_K = cute.make_tiled_copy_tv(atom_async_copy, tK_layout, vQKV_layout)
self.gmem_tiled_copy_V = cute.make_tiled_copy_tv(atom_async_copy, tV_layout, vQKV_layout)
# gmem_tiled_copy_O: tiled copy for O store
self.gmem_tiled_copy_O = cute.make_tiled_copy_tv(atom_universal_copy, tO_layout, vO_layout)
def _get_smem_layout_atom(self):
raise NotImplementedError()
def _get_tiled_mma(self):
raise NotImplementedError()
def _get_shared_storage_cls(self):
raise NotImplementedError()
@cute.jit
def __call__(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mO: cute.Tensor,
mLSE: Optional[cute.Tensor],
softmax_scale: Float32,
stream: cuda.CUstream,
):
"""Configures and launches the flash attention kernel.
mQ/mK/mV/mO has same data types(supports fp16 and bf16) and same layout:
(batch_size, seqlen_q, num_head, head_dim):(_, _, _, 1)
"""
raise NotImplementedError()
@cute.jit
def epilogue(
self,
acc_O: cute.Tensor,
lse: cute.Tensor,
mO: cute.Tensor,
mLSE: Optional[cute.Tensor],
sO: cute.Tensor,
seqlen: SeqlenInfoQK,
gmem_tiled_copy_O: cute.TiledCopy,
tma_atom_O: Optional[cute.CopyAtom],
tiled_mma: cute.TiledMma,
tidx: Int32,
m_block: Int32,
head_idx: Int32,
batch_idx: Int32,
):
# store acc_O
rO = cute.make_fragment_like(acc_O, self.dtype)
rO.store(acc_O.load().to(self.dtype))
# Make sure all threads have finished reading V
cute.arch.barrier(
barrier_id=int(NamedBarrierFwd.Epilogue), number_of_threads=self.num_epilogue_threads
)
smem_copy_atom_O = utils.get_smem_store_atom(self.arch, self.dtype)
smem_thr_copy_O = cute.make_tiled_copy_C(smem_copy_atom_O, tiled_mma).get_slice(tidx)
taccOrO = smem_thr_copy_O.retile(rO)
taccOsO = smem_thr_copy_O.partition_D(sO)
# taccOsO = copy_utils.partition_D_position_independent(smem_thr_copy_O, sO)
# copy acc O from rmem to smem with the smem copy atom
cute.copy(smem_copy_atom_O, taccOrO, taccOsO)
cO = cute.make_identity_tensor((self.tile_m, self.tile_hdimv))
pack_gqa = PackGQA(
self.tile_m, self.tile_hdimv, self.check_hdim_v_oob, self.qhead_per_kvhead
)
# Write LSE from rmem -> gmem
if const_expr(mLSE is not None):
if const_expr(not seqlen.has_cu_seqlens_q):
mLSE_cur = mLSE[None, head_idx, batch_idx]
else:
offset = seqlen.offset_q if const_expr(not self.pack_gqa) else (0, seqlen.offset_q)
mLSE_cur = cute.domain_offset((offset,), mLSE[None, head_idx])
if const_expr(not self.pack_gqa):
gLSE = cute.local_tile(mLSE_cur, (self.tile_m,), (m_block,))
gLSE_expanded_layout = cute.append(
gLSE.layout, cute.make_layout((self.tile_hdimv,), stride=(0,))
)
gLSE_expanded = cute.make_tensor(gLSE.iterator, gLSE_expanded_layout)
thr_mma = tiled_mma.get_slice(tidx)
taccOgLSE = layout_utils.reshape_acc_to_mn(thr_mma.partition_C(gLSE_expanded))
assert cute.size(taccOgLSE, mode=[0]) == cute.size(lse)
taccOcO = layout_utils.reshape_acc_to_mn(thr_mma.partition_C(cO))
t0accOcO = layout_utils.reshape_acc_to_mn(thr_mma.get_slice(0).partition_C(cO))
# Only the thread corresponding to column 0 writes out the lse to gmem
if taccOcO[0][1] == 0:
for m in cutlass.range_constexpr(cute.size(taccOgLSE.shape[1])):
if (
t0accOcO[m, 0][0]
< seqlen.seqlen_q - m_block * self.tile_m - taccOcO[0][0]
):
taccOgLSE[m, 0] = lse[m]
else:
pack_gqa.store_LSE(mLSE_cur, lse, tiled_mma, tidx, m_block, seqlen.seqlen_q)
if const_expr(not seqlen.has_cu_seqlens_q):
mO_cur = mO[None, None, head_idx, batch_idx]
else:
offset = seqlen.offset_q if const_expr(not self.pack_gqa) else (0, seqlen.offset_q)
mO_cur = cute.domain_offset((offset, 0), mO[None, None, head_idx])
# thr_mma = tiled_mma.get_slice(tidx)
# taccOgO = thr_mma.partition_C(gO)
# cute.autovec_copy(rO, taccOgO)
# sync to make sure all smem stores are done
if const_expr(self.use_tma_O):
# ensure smem writes are visible to TMA
cute.arch.fence_view_async_shared()
cute.arch.barrier_arrive(
barrier_id=int(NamedBarrierFwd.Epilogue),
number_of_threads=self.num_epilogue_threads + cute.arch.WARP_SIZE,
)
gO = cute.local_tile(mO_cur, (self.tile_m, self.tile_hdimv), (m_block, 0))
store_O, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_O, 0, cute.make_layout(1), sO, gO, single_stage=True
)
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx())
if warp_idx == 4:
cute.arch.barrier(
barrier_id=int(NamedBarrierFwd.Epilogue),
number_of_threads=self.num_epilogue_threads + cute.arch.WARP_SIZE,
)
store_O()
cute.arch.cp_async_bulk_commit_group()
cute.arch.cp_async_bulk_wait_group(0, read=True)
else:
cute.arch.barrier(
barrier_id=int(NamedBarrierFwd.Epilogue),
number_of_threads=self.num_epilogue_threads,
)
gmem_thr_copy_O = gmem_tiled_copy_O.get_slice(tidx)
tOsO = gmem_thr_copy_O.partition_S(sO)
tOrO = cute.make_fragment_like(tOsO, self.dtype)
# load acc O from smem to rmem for wider vectorization
cute.autovec_copy(tOsO, tOrO)
if const_expr(not self.pack_gqa):
gO = cute.local_tile(mO_cur, (self.tile_m, self.tile_hdimv), (m_block, 0))
tOgO = gmem_thr_copy_O.partition_D(gO)
tOcO = gmem_thr_copy_O.partition_S(cO)
t0OcO = gmem_tiled_copy_O.get_slice(0).partition_S(cO)
tOpO = utils.predicate_k(tOcO, limit=mO.shape[1])
# copy acc O from rmem to gmem
for rest_m in cutlass.range_constexpr(cute.size(tOrO.shape[1])):
if (
t0OcO[0, rest_m, 0][0]
< seqlen.seqlen_q - m_block * self.tile_m - tOcO[0][0]
):
cute.copy(
gmem_tiled_copy_O,
tOrO[None, rest_m, None],
tOgO[None, rest_m, None],
pred=tOpO[None, rest_m, None]
if const_expr(self.check_hdim_v_oob)
else None,
)
else:
pack_gqa.store_O(mO_cur, tOrO, gmem_tiled_copy_O, tidx, m_block, seqlen.seqlen_q)
@cute.jit
def advance_pipeline(self, pipeline_index):
return pipeline_index + 1 if pipeline_index < self.num_stages - 1 else 0
@cute.jit
def load_Q(
self,
gmem_thr_copy: cute.TiledCopy,
gQ: cute.Tensor,
sQ: cute.Tensor,
block: Int32,
seqlen: Int32,
headdim: Int32,
):
tQsQ, tQgQ = gmem_thr_copy.partition_D(sQ), gmem_thr_copy.partition_S(gQ)
cQ = cute.make_identity_tensor((self.tile_m, self.tile_hdim))
tQcQ = gmem_thr_copy.partition_S(cQ)
t0QcQ = gmem_thr_copy.get_slice(0).partition_S(cQ)
tQpQ = utils.predicate_k(tQcQ, limit=headdim)
for m in cutlass.range_constexpr(cute.size(tQsQ.shape[1])):
# Instead of using tQcQ, we using t0QcQ and subtract the offset from the limit
# (seqlen - block * kBlockM). This is because the entries of t0QcQ are known at compile time.
if t0QcQ[0, m, 0][0] < seqlen - block * self.tile_m - tQcQ[0][0]:
cute.copy(
gmem_thr_copy,
tQgQ[None, m, None],
tQsQ[None, m, None],
pred=tQpQ[None, m, None] if const_expr(self.check_hdim_oob) else None,
)
# We don't need to clear the sQ smem tiles since we'll only write out the valid outputs
@cute.jit
def load_K(
self,
gmem_tiled_copy: cute.TiledCopy,
tKgK: cute.Tensor,
tKsK: cute.Tensor,
tKcK: cute.Tensor,
t0KcK: cute.Tensor,
tKpK: cute.Tensor,
block: Int32,
smem_pipe_write: Int32,
seqlen: Int32,
need_predicates: cutlass.Constexpr,
):
# Do we need to check if we overshoot kBlockN when we load K?
is_even_n_smem_k = self.tile_n % gmem_tiled_copy.tiler_mn[0].shape == 0
if const_expr(need_predicates or not is_even_n_smem_k):
# Instead of using tKcK, we using t0KcK and subtract the offset from the limit
# (seqlen - block * kBlockN). This is because the entries of t0KcK are known at compile time.
if const_expr(is_even_n_smem_k):
seqlen_limit = seqlen - block * self.tile_n
else:
if const_expr(not need_predicates):
seqlen_limit = self.tile_n
else:
seqlen_limit = cutlass.min(seqlen - block * self.tile_n, self.tile_n)
seqlen_limit -= tKcK[0][0]
for n in cutlass.range_constexpr(cute.size(tKsK.shape[1])):
if t0KcK[0, n, 0][0] < seqlen_limit:
cute.copy(
gmem_tiled_copy,
tKgK[None, n, None, block],
tKsK[
None, n, None, smem_pipe_write if const_expr(self.num_stages > 1) else 0
],
pred=tKpK[None, n, None] if const_expr(self.check_hdim_oob) else None,
)
# We don't need to clear the sK smem tiles since we'll mask out the scores anyway.
else:
cute.copy(
gmem_tiled_copy,
tKgK[None, None, None, block],
tKsK[None, None, None, smem_pipe_write if const_expr(self.num_stages > 1) else 0],
pred=tKpK if const_expr(self.check_hdim_oob) else None,
)
@cute.jit
def load_V(
self,
gmem_tiled_copy: cute.TiledCopy,
tVgV: cute.Tensor,
tVsV: cute.Tensor,
tVcV: cute.Tensor,
t0VcV: cute.Tensor,
tVpV: cute.Tensor,
block: Int32,
smem_pipe_write: Int32,
seqlen: Int32,
need_predicates: cutlass.Constexpr,
):
# Do we need to check if we overshoot kBlockN when we load V?
is_even_n_smem_v = self.tile_n % gmem_tiled_copy.tiler_mn[0].shape == 0
if const_expr(need_predicates or not is_even_n_smem_v):
for n in cutlass.range_constexpr(cute.size(tVsV.shape[1])):
# If kBlockN doesn't evenly divide the tiled copy, only the last `n` needs to be checked
if (
is_even_n_smem_v
or n < cute.size(tVsV.shape[1]) - 1
or tVcV[0, n, 0][0] < self.tile_n
):
predicate = tVpV[None, n, None] if const_expr(self.check_hdim_v_oob) else None
if const_expr(need_predicates):
seqlen_limit = seqlen - block * self.tile_n - tVcV[0][0]
predicate_n = t0VcV[0, n, 0][0] < seqlen_limit
predicate = cute.make_fragment_like(tVpV[None, 0, None])
for k in cutlass.range_constexpr(cute.size(predicate.shape[1])):
for i in cutlass.range_constexpr(cute.size(predicate.shape[0])):
predicate[i, k] = (
tVpV[i, n, k] if const_expr(self.check_hdim_v_oob) else True
) and predicate_n
cute.copy(
gmem_tiled_copy,
tVgV[None, n, None, block],
tVsV[
None, n, None, smem_pipe_write if const_expr(self.num_stages > 1) else 0
],
pred=predicate,
)
else:
cute.copy(
gmem_tiled_copy,
tVgV[None, None, None, block],
tVsV[None, None, None, smem_pipe_write if const_expr(self.num_stages > 1) else 0],
pred=tVpV if const_expr(self.check_hdim_v_oob) else None,
)
class FlashAttentionForwardSm80(FlashAttentionForwardBase):
def _get_smem_layout_atom(self):
sQ_layout_atom = sm80_utils.get_smem_layout_atom(self.dtype, self.tile_hdim)
sK_layout_atom = sQ_layout_atom
sV_layout_atom = sm80_utils.get_smem_layout_atom(self.dtype, self.tile_hdimv)
sO_layout_atom = sV_layout_atom
sP_layout_atom = None
return sQ_layout_atom, sK_layout_atom, sV_layout_atom, sO_layout_atom, sP_layout_atom
def _get_tiled_mma(self):
tiled_mma_qk = cute.make_tiled_mma(
warp.MmaF16BF16Op(self.dtype, Float32, (16, 8, 16)),
(self.num_threads // 32, 1, 1),
permutation_mnk=(self.num_threads // 32 * 16, 16, 16),
)
tiled_mma_pv = cute.make_tiled_mma(
warp.MmaF16BF16Op(self.dtype, Float32, (16, 8, 16)),
(self.num_threads // 32, 1, 1),
permutation_mnk=(self.num_threads // 32 * 16, 16, 16),
)
return tiled_mma_qk, tiled_mma_pv
def _get_shared_storage_cls(self):
sQ_struct, sK_struct, sV_struct = [
cute.struct.Align[cute.struct.MemRange[self.dtype, cute.cosize(layout)], 1024]
for layout in (self.sQ_layout, self.sK_layout, self.sV_layout)
]
cosize_sQV = max(cute.cosize(self.sQ_layout), cute.cosize(self.sV_layout))
sQV_struct = cute.struct.Align[cute.struct.MemRange[self.dtype, cosize_sQV], 1024]
@cute.struct
class SharedStorageQKV:
sV: sV_struct
sQ: sQ_struct
sK: sK_struct
@cute.struct
class SharedStorageSharedQV:
sQ: sQV_struct
sK: sK_struct
return SharedStorageQKV if const_expr(not self.Q_in_regs) else SharedStorageSharedQV
@cute.jit
def __call__(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mO: cute.Tensor,
mLSE: Optional[cute.Tensor],
stream: cuda.CUstream,
softmax_scale: Optional[Float32] = None,
window_size_left: Optional[Int32] = None,
window_size_right: Optional[Int32] = None,
learnable_sink: Optional[cute.Tensor] = None,
aux_tensors=None,
):
"""Configures and launches the flash attention kernel.
mQ/mK/mV/mO has same data types(supports fp16 and bf16) and same layout:
(batch_size, seqlen_q, num_head, head_dim):(_, _, _, 1)
"""
assert learnable_sink is None, "Learnable sink is not supported in this kernel"
self._check_type(
*(t.element_type if t is not None else None for t in (mQ, mK, mV, mO, mLSE))
)
tiled_mma_qk, tiled_mma_pv = self._get_tiled_mma()
self.num_mma_threads = tiled_mma_pv.size
self.num_producer_threads = self.num_threads
self.num_Q_load_threads = self.num_threads
self.num_epilogue_threads = self.num_threads
# self.use_tma_O = self.arch >= 90 and mCuSeqlensQ is None
self.use_tma_O = self.arch >= 90
self._setup_attributes()
SharedStorage = self._get_shared_storage_cls()
mQ, mK, mV, mO = [assume_tensor_aligned(t) for t in (mQ, mK, mV, mO)]
mQ, mK, mV, mO = [
cute.make_tensor(t.iterator, cute.select(t.layout, mode=[1, 3, 2, 0]))
for t in (mQ, mK, mV, mO)
]
mLSE = cute.make_tensor(mLSE.iterator, cute.select(mLSE.layout, mode=[2, 1, 0]))
# grid_dim: (m_block, num_head, batch_size)
grid_dim = (
cute.ceil_div(mQ.shape[0], self.tile_m),
cute.size(mQ.shape[2]),
cute.size(mQ.shape[3]),
)
LOG2_E = math.log2(math.e)
if const_expr(self.score_mod is None):
softmax_scale_log2 = Float32(softmax_scale * LOG2_E)
softmax_scale = None
else:
# NB: If a user passes in a score mod, we want to apply the score-mod in the sm_scaled qk
# But in the original base 10. We hijack softmax_scale_log2 to just be the change of base
# and correctly apply the softmax_scale prior to score_mod in the softmax step
softmax_scale_log2 = Float32(LOG2_E)
softmax_scale = Float32(softmax_scale)
fastdiv_mods = None
if const_expr(aux_tensors is not None):
seqlen_q = cute.size(mQ.shape[0]) // (
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1
)
seqlen_k = cute.size(mK.shape[0])
seqlen_q_divmod = FastDivmodDivisor(seqlen_q)
seqlen_k_divmod = FastDivmodDivisor(seqlen_k)
fastdiv_mods = (seqlen_q_divmod, seqlen_k_divmod)
self.kernel(
mQ,
mK,
mV,
mO,
mLSE,
softmax_scale_log2,
softmax_scale,
window_size_left,
window_size_right,
self.sQ_layout,
self.sK_layout,
self.sV_layout,
self.sO_layout,
self.sP_layout,
self.gmem_tiled_copy_Q,
self.gmem_tiled_copy_K,
self.gmem_tiled_copy_V,
self.gmem_tiled_copy_O,
tiled_mma_qk,
tiled_mma_pv,
SharedStorage,
aux_tensors,
fastdiv_mods,
).launch(
grid=grid_dim,
block=[self.num_threads, 1, 1],
smem=SharedStorage.size_in_bytes(),
stream=stream,
)
@cute.kernel
def kernel(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mO: cute.Tensor,
mLSE: Optional[cute.Tensor],
softmax_scale_log2: Float32,
softmax_scale: Optional[Float32],
window_size_left: Optional[Int32],
window_size_right: Optional[Int32],
sQ_layout: cute.ComposedLayout,
sK_layout: cute.ComposedLayout,
sV_layout: cute.ComposedLayout,
sO_layout: cute.ComposedLayout,
sP_layout: cute.ComposedLayout | None,
gmem_tiled_copy_Q: cute.TiledCopy,
gmem_tiled_copy_K: cute.TiledCopy,
gmem_tiled_copy_V: cute.TiledCopy,
gmem_tiled_copy_O: cute.TiledCopy,
tiled_mma_qk: cute.TiledMma,
tiled_mma_pv: cute.TiledMma,
SharedStorage: cutlass.Constexpr,
aux_tensors=None,
fastdiv_mods=None,
):
# Thread index, block index
tidx, _, _ = cute.arch.thread_idx()
m_block, num_head, batch_size = cute.arch.block_idx()
block_info = BlockInfo(
self.tile_m,
self.tile_n,
self.is_causal,
self.is_local,
False, # is_split_kv
window_size_left,
window_size_right,
qhead_per_kvhead_packgqa=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
)
seqlen = SeqlenInfoQK.create(seqlen_q_static=mQ.shape[0], seqlen_k_static=mK.shape[0])
n_block_min, n_block_max = block_info.get_n_block_min_max(seqlen, m_block)
# TODO: return early if n_block_max == 0
# if self.is_causal:
# if n_block_max <= 0:
# return
n_block = n_block_max - 1
# ///////////////////////////////////////////////////////////////////////////////
# Get the appropriate tiles for this thread block.
# ///////////////////////////////////////////////////////////////////////////////
blkQ_shape = (self.tile_m, self.tile_hdim)
blkK_shape = (self.tile_n, self.tile_hdim)
blkV_shape = (self.tile_n, self.tile_hdimv)
gQ = cute.local_tile(mQ[None, None, num_head, batch_size], blkQ_shape, (m_block, 0))
num_head_kv = num_head // self.qhead_per_kvhead
gK = cute.local_tile(mK[None, None, num_head_kv, batch_size], blkK_shape, (None, 0))
gV = cute.local_tile(mV[None, None, num_head_kv, batch_size], blkV_shape, (None, 0))
# ///////////////////////////////////////////////////////////////////////////////
# Get shared memory buffer
# ///////////////////////////////////////////////////////////////////////////////
smem = cutlass.utils.SmemAllocator()
storage = smem.allocate(SharedStorage)
sQ = storage.sQ.get_tensor(sQ_layout)
sK = storage.sK.get_tensor(sK_layout)
if const_expr(not self.Q_in_regs):
sV = storage.sV.get_tensor(sV_layout)
else:
sV = cute.make_tensor(cute.recast_ptr(sQ.iterator, dtype=self.dtype), sV_layout)
# Transpose view of V to tensor with layout (head_dim_v, tile_n) for tiled mma
sVt = layout_utils.transpose_view(sV)
gmem_thr_copy_K = gmem_tiled_copy_K.get_slice(tidx)
gmem_thr_copy_V = gmem_tiled_copy_V.get_slice(tidx)
# (CPY_Atom, CPY_N, CPY_K, n_block)
tKsK, tKgK = gmem_thr_copy_K.partition_D(sK), gmem_thr_copy_K.partition_S(gK)
# (CPY_Atom, CPY_N, CPY_K, n_block)
tVsV, tVgV = gmem_thr_copy_V.partition_D(sV), gmem_thr_copy_V.partition_S(gV)
# ///////////////////////////////////////////////////////////////////////////////
# Tile MMA compute thread partitions and allocate accumulators
# ///////////////////////////////////////////////////////////////////////////////
thr_mma_qk = tiled_mma_qk.get_slice(tidx)
thr_mma_pv = tiled_mma_pv.get_slice(tidx)
tSrQ = thr_mma_qk.make_fragment_A(thr_mma_qk.partition_A(sQ))
tSrK = thr_mma_qk.make_fragment_B(thr_mma_qk.partition_B(sK[None, None, 0]))
tOrVt = thr_mma_pv.make_fragment_B(thr_mma_pv.partition_B(sVt[None, None, 0]))
acc_shape_O = thr_mma_pv.partition_shape_C((self.tile_m, self.tile_hdimv))
acc_O = cute.make_fragment(acc_shape_O, Float32)
acc_O.fill(0.0)
# ///////////////////////////////////////////////////////////////////////////////
# Smem copy atom tiling
# ///////////////////////////////////////////////////////////////////////////////
smem_copy_atom_QK = cute.make_copy_atom(
warp.LdMatrix8x8x16bOp(transpose=False, num_matrices=4),
self.dtype,
)
smem_copy_atom_V = cute.make_copy_atom(
warp.LdMatrix8x8x16bOp(transpose=True, num_matrices=4),
self.dtype,
)
smem_thr_copy_Q = utils.make_tiled_copy_A(smem_copy_atom_QK, tiled_mma_qk).get_slice(tidx)
smem_thr_copy_K = utils.make_tiled_copy_B(smem_copy_atom_QK, tiled_mma_qk).get_slice(tidx)
smem_thr_copy_V = utils.make_tiled_copy_B(smem_copy_atom_V, tiled_mma_pv).get_slice(tidx)
tSsQ = smem_thr_copy_Q.partition_S(sQ)
tSsK = smem_thr_copy_K.partition_S(sK)
tOsVt = smem_thr_copy_V.partition_S(sVt)
# ///////////////////////////////////////////////////////////////////////////////
# Predicate: Mark indices that need to copy when problem_shape isn't a multiple
# of tile_shape
# ///////////////////////////////////////////////////////////////////////////////
# Construct identity layout for KV
cK = cute.make_identity_tensor((self.tile_n, self.tile_hdim))
tKcK = gmem_thr_copy_K.partition_S(cK)
t0KcK = gmem_thr_copy_K.get_slice(0).partition_S(cK)
if const_expr(self.tile_hdim == self.tile_hdimv):
tVcV = tKcK
t0VcV = t0KcK
else:
cV = cute.make_identity_tensor((self.tile_n, self.tile_hdimv))
tVcV = gmem_thr_copy_V.partition_S(cV)
t0VcV = gmem_thr_copy_V.get_slice(0).partition_S(cV)
# Allocate predicate tensors for m and n, here we only allocate the tile of k, and
# use "if" on the mn dimension.
# This is to reduce register pressure and gets 2-3% performance gain.
tKpK = utils.predicate_k(tKcK, limit=mK.shape[1])
if const_expr(self.same_hdim_kv):
tVpV = tKpK
else:
tVpV = utils.predicate_k(tVcV, limit=mV.shape[1])
# shape: (atom_v_m * rest_m)
softmax = Softmax.create(
softmax_scale_log2,
num_rows=acc_O.shape[0][0] * acc_O.shape[1],
softmax_scale=softmax_scale,
)
softmax.reset()
# group parameters for compute_one_n_block
mma_params = SimpleNamespace(
thr_mma_qk=thr_mma_qk,
thr_mma_pv=thr_mma_pv,
tSrQ=tSrQ,
tSrK=tSrK,
tOrVt=tOrVt,
acc_O=acc_O,
)
smem_copy_params = SimpleNamespace(
smem_thr_copy_Q=smem_thr_copy_Q,
smem_thr_copy_K=smem_thr_copy_K,
smem_thr_copy_V=smem_thr_copy_V,
tSsQ=tSsQ,
tSsK=tSsK,
tOsVt=tOsVt,
)
load_K = partial(
self.load_K, gmem_tiled_copy_K, tKgK, tKsK, tKcK, t0KcK, tKpK, seqlen=seqlen.seqlen_k
)
load_V = partial(
self.load_V, gmem_tiled_copy_V, tVgV, tVsV, tVcV, t0VcV, tVpV, seqlen=seqlen.seqlen_k
)
compute_one_n_block = partial(
self.compute_one_n_block,
mma_params=mma_params,
smem_copy_params=smem_copy_params,
softmax=softmax,
load_K=load_K,
load_V=load_V,
score_mod=self.score_mod,
batch_idx=batch_size,
head_idx=num_head,
m_block=m_block,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
# ///////////////////////////////////////////////////////////////////////////////
# Prologue
# ///////////////////////////////////////////////////////////////////////////////
# Start async loads of the last mn-tile, where we take care of the mn residue
gmem_thr_copy_Q = gmem_tiled_copy_Q.get_slice(tidx)
self.load_Q(gmem_thr_copy_Q, gQ, sQ, m_block, seqlen=seqlen.seqlen_q, headdim=mQ.shape[1])
cute.arch.cp_async_commit_group()
def preprocess_Q():
cute.arch.cp_async_wait_group(self.num_stages * 2 - 1)
if const_expr(self.Q_in_regs):
cute.arch.barrier()
tSrQ_copy_view = smem_thr_copy_Q.retile(tSrQ)
cute.copy(smem_thr_copy_Q, tSsQ, tSrQ_copy_view)
# If Q_in_regs, we load Q, then load 1 stage of K, then (optionally) rotate Q and
# read from smem_q to registers, then load V.
# If !Q_in_regs, we load Q, load all stages of K & V, then (optionally) rotate Q.
if const_expr(self.Q_in_regs):
load_K(n_block, smem_pipe_write=0, need_predicates=True)
cute.arch.cp_async_commit_group()
preprocess_Q()
cute.arch.barrier() # Make sure all threads have read smem_q before loading V
for stage in cutlass.range_constexpr(self.num_stages):
if const_expr(not self.Q_in_regs or stage > 0):
if stage == 0 or n_block - stage >= 0:
load_K(n_block - stage, smem_pipe_write=stage, need_predicates=stage == 0)
cute.arch.cp_async_commit_group()
if const_expr(stage < self.num_stages - 1):
if stage == 0 or n_block - stage >= 0:
load_V(n_block - stage, smem_pipe_write=stage, need_predicates=stage == 0)
cute.arch.cp_async_commit_group()
if const_expr(not self.Q_in_regs):
preprocess_Q()
# ///////////////////////////////////////////////////////////////////////////////
# Mainloop
# ///////////////////////////////////////////////////////////////////////////////
# Start processing of the first n-block.
# For performance reason, we separate out two kinds of iterations:
# those that need masking on S, and those that don't.
# We need masking on S for the very last block when K and V has length not multiple of tile_n.
# We also need masking on S if it's causal, for the last several blocks.
mask = AttentionMask(
self.tile_m,
self.tile_n,
seqlen.seqlen_q,
seqlen.seqlen_k,
window_size_left,
window_size_right,
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
)
mask_fn = partial(
mask.apply_mask,
m_block=m_block,
thr_mma=thr_mma_qk,
mask_causal=self.is_causal,
mask_local=self.is_local,
fastdiv_mods=fastdiv_mods if const_expr(self.mask_mod is not None) else None,
)
# First iteration with seqlen masking
smem_pipe_read = Int32(0)
smem_pipe_write = Int32(self.num_stages - 1)
compute_one_n_block(
n_block,
smem_pipe_read,
smem_pipe_write,
is_first_n_block=True,
check_inf=True,
mask_fn=partial(mask_fn, mask_seqlen=True),
)
smem_pipe_read = self.advance_pipeline(smem_pipe_read)
smem_pipe_write = self.advance_pipeline(smem_pipe_write)
# Next couple of iterations with causal masking
if const_expr(self.is_causal or self.is_local):
n_block_min_causal_local_mask = block_info.get_n_block_min_causal_local_mask(
seqlen, m_block, n_block_min
)
for n_tile in cutlass.range(n_block_max - 1 - n_block_min_causal_local_mask, unroll=1):
n_block = n_block_max - 2 - n_tile
compute_one_n_block(
n_block,
smem_pipe_read,
smem_pipe_write,
check_inf=True,
mask_fn=partial(mask_fn, mask_seqlen=False),
)
smem_pipe_read = self.advance_pipeline(smem_pipe_read)
smem_pipe_write = self.advance_pipeline(smem_pipe_write)
# The remaining iterations have no masking
for n_tile in cutlass.range(n_block, unroll=1):
compute_one_n_block(
n_block - n_tile - 1, smem_pipe_read, smem_pipe_write, check_inf=True
)
smem_pipe_read = self.advance_pipeline(smem_pipe_read)
smem_pipe_write = self.advance_pipeline(smem_pipe_write)
# TODO: local
# normalize acc_O by row_sum and calculate the lse
row_scale = softmax.finalize()
softmax.rescale_O(acc_O, row_scale)
# ///////////////////////////////////////////////////////////////////////////////
# Epilogue
# ///////////////////////////////////////////////////////////////////////////////
# reuse sQ's data iterator
sO = cute.make_tensor(sQ.iterator, sO_layout)
self.epilogue(
acc_O,
softmax.row_sum,
mO,
mLSE,
sO,
seqlen,
gmem_tiled_copy_O,
None,
tiled_mma_pv,
tidx,
m_block,
num_head,
batch_size,
)
@cute.jit
def compute_one_n_block(
self,
n_block: Int32,
smem_pipe_read: Int32,
smem_pipe_write: Int32,
mma_params: SimpleNamespace,
smem_copy_params: SimpleNamespace,
softmax: Softmax,
load_K: Callable,
load_V: Callable,
score_mod: Callable | None,
batch_idx: cutlass.Int32,
head_idx: cutlass.Int32,
m_block: cutlass.Int32,
seqlen: SeqlenInfoQK,
aux_tensors=None,
fastdiv_mods=None,
mask_fn: Optional[Callable] = None,
is_first_n_block: cutlass.Constexpr = False,
check_inf: cutlass.Constexpr = True,
):
"""Compute one n_block of S/O.
This function provides different variants for processing the first n block versus
subsequent blocks.
"""
def sync():
cute.arch.cp_async_wait_group(self.num_stages * 2 - 2)
cute.arch.barrier()
acc_shape_S = mma_params.thr_mma_qk.partition_shape_C((self.tile_m, self.tile_n))
acc_S = cute.make_fragment(acc_shape_S, Float32)
acc_S.fill(0.0)
# wait for smem tile QK before mma calculation for S
sync()
# need predicates for the first tile
def load_V_next():
if self.num_stages == 1 or n_block - self.num_stages + 1 >= 0:
load_V(
n_block - self.num_stages + 1,
smem_pipe_write,
need_predicates=is_first_n_block and self.num_stages == 1,
)
cute.arch.cp_async_commit_group()
load_V_next()
sm80_utils.gemm(
mma_params.thr_mma_qk,
acc_S,
mma_params.tSrQ,
mma_params.tSrK,
smem_copy_params.tSsQ,
smem_copy_params.tSsK[
None, None, None, smem_pipe_read if const_expr(self.num_stages > 1) else 0
],
smem_copy_params.smem_thr_copy_Q,
smem_copy_params.smem_thr_copy_K,
# hook_fn=load_V_next,
A_in_regs=self.Q_in_regs,
)
if const_expr(score_mod is not None):
self.apply_score_mod(
mma_params.thr_mma_qk,
batch_idx,
head_idx,
m_block,
acc_S,
n_block,
seqlen,
softmax_scale=softmax.softmax_scale,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
smem_pipe_write = self.advance_pipeline(smem_pipe_write)
def load_K_next():
if n_block - self.num_stages >= 0:
load_K(n_block - self.num_stages, smem_pipe_write, need_predicates=False)
cute.arch.cp_async_commit_group()
# wait for smem tile V for O
if const_expr(self.num_stages == 1):
sync()
load_K_next()
if const_expr(mask_fn is not None):
mask_fn(acc_S, n_block=n_block)
row_scale = softmax.online_softmax(acc_S, is_first=is_first_n_block, check_inf=check_inf)
softmax.rescale_O(mma_params.acc_O, row_scale)
rP = cute.make_fragment_like(acc_S, self.dtype)
rP.store(acc_S.load().to(self.dtype))
tOrP = layout_utils.reshape_acc_to_frgA(rP)
if const_expr(self.num_stages > 1):
sync()
load_K_next()
sm80_utils.gemm_rs(
mma_params.thr_mma_pv,
mma_params.acc_O,
tOrP,
mma_params.tOrVt,
smem_copy_params.tOsVt[
None, None, None, smem_pipe_read if const_expr(self.num_stages > 1) else 0
],
smem_copy_params.smem_thr_copy_V,
# hook_fn=load_K_next,
)
# if const_expr(self.num_stages > 1):
# load_K_next()
class FlashAttentionForwardSm90(FlashAttentionForwardBase):
arch = 90
def __init__(
self,
*args,
intra_wg_overlap: bool = True,
mma_pv_is_rs: bool = True,
**kwargs,
):
super().__init__(*args, **kwargs)
self.intra_wg_overlap = intra_wg_overlap
self.mma_pv_is_rs = mma_pv_is_rs
self.buffer_align_bytes = 1024
def _get_smem_layout_atom(self):
sQ_layout_atom = warpgroup.make_smem_layout_atom(
sm90_utils_basic.get_smem_layout_atom(LayoutEnum.ROW_MAJOR, self.dtype, self.tile_hdim),
self.dtype,
)
sK_layout_atom = sQ_layout_atom
sV_layout_atom = warpgroup.make_smem_layout_atom(
sm90_utils_basic.get_smem_layout_atom(
LayoutEnum.ROW_MAJOR, self.dtype, self.tile_hdimv
),
self.dtype,
)
sO_layout_atom = sV_layout_atom
if not self.mma_pv_is_rs:
sP_layout_atom = warpgroup.make_smem_layout_atom(
sm90_utils_basic.get_smem_layout_atom(
LayoutEnum.ROW_MAJOR, self.dtype, self.tile_n
),
self.dtype,
)
else:
sP_layout_atom = None
return sQ_layout_atom, sK_layout_atom, sV_layout_atom, sO_layout_atom, sP_layout_atom
def _get_tiled_mma(self):
tiled_mma_qk = sm90_utils_basic.make_trivial_tiled_mma(
self.dtype,
self.dtype,
warpgroup.OperandMajorMode.K,
warpgroup.OperandMajorMode.K,
Float32,
atom_layout_mnk=(self.tile_m // 64, 1, 1), # Might need (1, 2, 1) for hdim 512
tiler_mn=(64, self.tile_n),
)
tiled_mma_pv = sm90_utils_basic.make_trivial_tiled_mma(
self.dtype,
self.dtype,
warpgroup.OperandMajorMode.K,
warpgroup.OperandMajorMode.MN,
Float32,
atom_layout_mnk=(self.tile_m // 64, 1, 1), # Might need (1, 2, 1) for hdim 512
tiler_mn=(64, self.tile_hdimv),
a_source=warpgroup.OperandSource.RMEM
if self.mma_pv_is_rs
else warpgroup.OperandSource.SMEM,
)
return tiled_mma_qk, tiled_mma_pv
def _get_shared_storage_cls(self):
sQ_struct, sK_struct, sV_struct = [
cute.struct.Align[cute.struct.MemRange[self.dtype, cute.cosize(layout)], self.buffer_align_bytes]
for layout in (self.sQ_layout, self.sK_layout, self.sV_layout)
]
cosize_sQV = max(cute.cosize(self.sQ_layout), cute.cosize(self.sV_layout))
sQV_struct = cute.struct.Align[cute.struct.MemRange[self.dtype, cosize_sQV], 1024]
cosize_sP = cute.cosize(self.sP_layout) if const_expr(self.sP_layout is not None) else 0
sP_struct = cute.struct.Align[cute.struct.MemRange[self.dtype, cosize_sP], 1024]
# 1 for Q, 1 for O, self.num_stages*2 for K, self.num_stages*2 for V,
mbar_ptr_QO_struct = cute.struct.MemRange[cutlass.Int64, 2]
mbar_ptr_K_struct = cute.struct.MemRange[cutlass.Int64, self.num_stages * 2]
mbar_ptr_V_struct = cute.struct.MemRange[cutlass.Int64, self.num_stages * 2]
@cute.struct
class SharedStorageQKV:
mbar_ptr: mbar_ptr_QO_struct
mbar_ptr_K: mbar_ptr_K_struct
mbar_ptr_V: mbar_ptr_V_struct
sV: sV_struct
sQ: sQ_struct
sK: sK_struct
sP: sP_struct
@cute.struct
class SharedStorageSharedQV:
mbar_ptr: mbar_ptr_QO_struct
mbar_ptr_K: mbar_ptr_K_struct
mbar_ptr_V: mbar_ptr_V_struct
sQ: sQV_struct
sK: sK_struct
sP: sP_struct
return SharedStorageQKV if const_expr(not self.Q_in_regs) else SharedStorageSharedQV
@cute.jit
def __call__(
self,
mQ: cute.Tensor, # (b, s_q, h, d) or (total_q, h, d) if there is cu_seqlens_q
mK: cute.Tensor, # (b_k, s_k, h_k, d) or (total_k, h_k, d) if there is cu_seqlens_k or (num_pages, page_size, h_k, d) if there is page_table
mV: cute.Tensor, # (b_k, s_k, h_k, dv) or (total_k, h_k, dv) if there is cu_seqlens_k or (num_pages, page_size, h_k, dv) if there is page_table
mO: cute.Tensor, # (b, s_q, h, dv) or (total_q, h, dv) if there is cu_seqlens_q
mLSE: Optional[cute.Tensor],
softmax_scale: Float32,
stream: cuda.CUstream,
mCuSeqlensQ: Optional[cute.Tensor] = None,
mCuSeqlensK: Optional[cute.Tensor] = None,
mSeqUsedQ: Optional[cute.Tensor] = None,
mSeqUsedK: Optional[cute.Tensor] = None,
mPageTable: Optional[cute.Tensor] = None, # (b_k, max_num_pages_per_seq)
window_size_left: Int32 | int | None = None,
window_size_right: Int32 | int | None = None,
learnable_sink: Optional[cute.Tensor] = None,
blocksparse_tensors: Optional[BlockSparseTensors] = None,
aux_tensors: Optional[list] = None,
):
"""Configures and launches the flash attention kernel.
mQ/mK/mV/mO has same data types(supports fp16 and bf16) and same layout:
(batch_size, seqlen_q, num_head, head_dim):(_, _, _, 1)
"""
self._check_type(
*(
t.element_type if t is not None else None
for t in (mQ, mK, mV, mO, mLSE, mCuSeqlensQ, mCuSeqlensK, mSeqUsedQ, mSeqUsedK)
)
)
mQ, mK, mV, mO = [assume_tensor_aligned(t) for t in (mQ, mK, mV, mO)]
QO_layout_transpose = [1, 3, 2, 0] if const_expr(mCuSeqlensQ is None) else [0, 2, 1]
mQ, mO = [layout_utils.select(t, QO_layout_transpose) for t in (mQ, mO)]
KV_layout_transpose = [1, 3, 2, 0] if const_expr(mCuSeqlensK is None) else [0, 2, 1]
mK, mV = [layout_utils.select(t, KV_layout_transpose) for t in (mK, mV)]
LSE_layout_transpose = [2, 1, 0] if const_expr(mCuSeqlensQ is None) else [1, 0]
mLSE = layout_utils.select(mLSE, LSE_layout_transpose) if const_expr(mLSE is not None) else None
tiled_mma_qk, tiled_mma_pv = self._get_tiled_mma()
self.num_mma_threads = tiled_mma_qk.size
self.num_threads_per_warp_group = 128
self.num_mma_warp_groups = self.num_mma_threads // self.num_threads_per_warp_group
self.num_threads = self.num_threads_per_warp_group * (self.num_mma_warp_groups + 1)
self.num_producer_threads = 32
self.num_Q_load_threads = self.num_mma_threads # If not TMA_Q, MMA threads load Q
self.num_epilogue_threads = self.num_mma_threads
self.num_mma_regs = (
256
if self.num_mma_warp_groups == 1
else (240 if self.num_mma_warp_groups == 2 else 160)
)
self.num_producer_regs = (
56 if self.num_mma_warp_groups == 1 else (24 if self.num_mma_warp_groups == 2 else 32)
)
# self.num_mma_regs = 232
# self.num_producer_regs = 40
self.use_block_sparsity = cutlass.const_expr(blocksparse_tensors is not None)
self.use_scheduler_barrier = (
(self.num_mma_warp_groups >= 2 and self.tile_hdim <= 128)
if const_expr(self.intra_wg_overlap)
else (self.num_mma_warp_groups == 2)
)
self.use_tma_Q = self.arch >= 90 and not (
self.pack_gqa and self.tile_m % self.qhead_per_kvhead != 0
)
self.use_tma_O = (
self.arch >= 90 and mCuSeqlensQ is None and mSeqUsedQ is None and not self.pack_gqa
)
# TODO: rescale_O_before_gemm
self._setup_attributes()
# TODO: we prob don't need most of what's in _setup_attributes
self.sQ_layout, self.sK_layout, self.sV_layout, self.sO_layout = [
sm90_utils.make_smem_layout(mX.element_type, LayoutEnum.ROW_MAJOR, shape, stage)
for mX, shape, stage in [
(mQ, (self.tile_m, self.tile_hdim), None),
(mK, (self.tile_n, self.tile_hdim), self.num_stages),
(mV, (self.tile_n, self.tile_hdimv), self.num_stages),
(mO, (self.tile_m, self.tile_hdimv), None),
]
]
self.sP_layout = None
if const_expr(not self.mma_pv_is_rs):
self.sP_layout = sm90_utils.make_smem_layout(
mV.element_type, LayoutEnum.ROW_MAJOR, (self.tile_m, self.tile_n)
)
SharedStorage = self._get_shared_storage_cls()
if const_expr(self.pack_gqa):
shape_Q_packed = (
(self.qhead_per_kvhead, mQ.shape[0]),
mQ.shape[1],
mK.shape[2],
*mQ.shape[3:],
)
stride_Q_packed = (
(mQ.stride[2], mQ.stride[0]),
mQ.stride[1],
mQ.stride[2] * self.qhead_per_kvhead,
*mQ.stride[3:],
)
mQ = cute.make_tensor(
mQ.iterator, cute.make_layout(shape_Q_packed, stride=stride_Q_packed)
)
shape_O_packed = (
(self.qhead_per_kvhead, mO.shape[0]),
mK.shape[1],
mK.shape[2],
*mO.shape[3:],
)
stride_O_packed = (
(mO.stride[2], mO.stride[0]),
mO.stride[1],
mO.stride[2] * self.qhead_per_kvhead,
*mO.stride[3:],
)
mO = cute.make_tensor(
mO.iterator, cute.make_layout(shape_O_packed, stride=stride_O_packed)
)
if const_expr(mLSE is not None):
shape_LSE_packed = (
(self.qhead_per_kvhead, mLSE.shape[0]),
mK.shape[2],
*mLSE.shape[2:],
)
stride_LSE_packed = (
(mLSE.stride[1], mLSE.stride[0]),
mLSE.stride[1] * self.qhead_per_kvhead,
*mLSE.stride[2:],
)
mLSE = cute.make_tensor(
mLSE.iterator, cute.make_layout(shape_LSE_packed, stride=stride_LSE_packed)
)
# TMA
gmem_tiled_copy_Q = cpasync.CopyBulkTensorTileG2SOp()
gmem_tiled_copy_KV = cpasync.CopyBulkTensorTileG2SOp() # Might multicast
gmem_tiled_copy_O = cpasync.CopyBulkTensorTileS2GOp()
self.tma_copy_bytes = {
name: cute.size_in_bytes(mX.element_type, cute.select(layout, mode=[0, 1]))
for name, mX, layout in [
("Q", mQ, self.sQ_layout),
("K", mK, self.sK_layout),
("V", mV, self.sV_layout),
]
}
tma_atom_Q, tma_tensor_Q = None, None
if const_expr(self.use_tma_Q):
tma_atom_Q, tma_tensor_Q = cpasync.make_tiled_tma_atom(
gmem_tiled_copy_Q,
mQ,
self.sQ_layout,
(self.tile_m, self.tile_hdim), # No mcast
)
tma_atom_K, tma_tensor_K = cpasync.make_tiled_tma_atom(
gmem_tiled_copy_KV,
mK,
cute.select(self.sK_layout, mode=[0, 1]),
(self.tile_n, self.tile_hdim),
1, # No mcast for now
)
tma_atom_V, tma_tensor_V = cpasync.make_tiled_tma_atom(
gmem_tiled_copy_KV,
mV,
cute.select(self.sV_layout, mode=[0, 1]),
(self.tile_n, self.tile_hdimv),
1, # No mcast for now
)
tma_atom_O, tma_tensor_O = None, None
if const_expr(self.use_tma_O):
tma_atom_O, tma_tensor_O = cpasync.make_tiled_tma_atom(
gmem_tiled_copy_O,
mO,
self.sO_layout,
(self.tile_m, self.tile_hdimv), # No mcast
)
if const_expr(mCuSeqlensQ is not None or mSeqUsedQ is not None):
TileScheduler = SingleTileVarlenScheduler
else:
TileScheduler = (
SingleTileScheduler
if const_expr(not self.is_causal or self.is_local)
else SingleTileLPTScheduler
)
tile_sched_args = TileSchedulerArguments(
cute.ceil_div(cute.size(mQ.shape[0]), self.tile_m),
cute.size(mQ.shape[2]),
cute.size(mQ.shape[3])
if const_expr(mCuSeqlensQ is None)
else cute.size(mCuSeqlensQ.shape[0] - 1),
1, # num_splits
cute.size(mK.shape[0]),
mQ.shape[1],
mV.shape[1],
total_q=cute.size(mQ.shape[0])
if const_expr(mCuSeqlensQ is not None)
else cute.size(mQ.shape[0]) * cute.size(mQ.shape[3]),
tile_shape_mn=(self.tile_m, self.tile_n),
mCuSeqlensQ=mCuSeqlensQ,
mSeqUsedQ=mSeqUsedQ,
qhead_per_kvhead_packgqa=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
element_size=self.dtype.width // 8,
is_persistent=False,
lpt=self.is_causal or self.is_local,
)
tile_sched_params = TileScheduler.to_underlying_arguments(tile_sched_args)
grid_dim = TileScheduler.get_grid_shape(tile_sched_params)
LOG2_E = math.log2(math.e)
if const_expr(self.score_mod is None):
softmax_scale_log2 = softmax_scale * LOG2_E
softmax_scale = None
else:
# NB: If a user passes in a score mod, we want to apply the score-mod in the sm_scaled qk
# But in the original base 10. We hijack softmax_scale_log2 to just be the change of base
# and correctly apply the softmax_scale prior to score_mod in the softmax step
softmax_scale_log2 = LOG2_E
softmax_scale = softmax_scale
if const_expr(window_size_left is not None):
window_size_left = Int32(window_size_left)
if const_expr(window_size_right is not None):
window_size_right = Int32(window_size_right)
fastdiv_mods = None
if const_expr(aux_tensors is not None):
seqlen_q = cute.size(mQ.shape[0]) // (
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1
)
seqlen_k = (
cute.size(mK.shape[0])
if const_expr(mPageTable is None)
else mK.shape[0] * mPageTable.shape[1]
)
seqlen_q_divmod = FastDivmodDivisor(seqlen_q)
seqlen_k_divmod = FastDivmodDivisor(seqlen_k)
fastdiv_mods = (seqlen_q_divmod, seqlen_k_divmod)
self.kernel(
tma_tensor_Q if const_expr(self.use_tma_Q) else mQ,
tma_tensor_K,
tma_tensor_V,
tma_tensor_O if const_expr(self.use_tma_O) else mO,
mLSE,
mCuSeqlensQ,
mCuSeqlensK,
mSeqUsedQ,
mSeqUsedK,
tma_atom_Q,
tma_atom_K,
tma_atom_V,
tma_atom_O,
softmax_scale_log2,
softmax_scale,
window_size_left,
window_size_right,
learnable_sink,
blocksparse_tensors,
self.sQ_layout,
self.sK_layout,
self.sV_layout,
self.sO_layout,
self.sP_layout,
self.gmem_tiled_copy_Q,
self.gmem_tiled_copy_K,
self.gmem_tiled_copy_V,
self.gmem_tiled_copy_O,
tiled_mma_qk,
tiled_mma_pv,
tile_sched_params,
TileScheduler,
SharedStorage,
aux_tensors,
fastdiv_mods,
).launch(
grid=grid_dim,
block=[self.num_threads, 1, 1],
stream=stream,
min_blocks_per_mp=1,
)
@cute.kernel
def kernel(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
mO: cute.Tensor,
mLSE: Optional[cute.Tensor],
mCuSeqlensQ: Optional[cute.Tensor],
mCuSeqlensK: Optional[cute.Tensor],
mSeqUsedQ: Optional[cute.Tensor],
mSeqUsedK: Optional[cute.Tensor],
tma_atom_Q: Optional[cute.CopyAtom],
tma_atom_K: Optional[cute.CopyAtom],
tma_atom_V: Optional[cute.CopyAtom],
tma_atom_O: Optional[cute.CopyAtom],
softmax_scale_log2: Float32,
softmax_scale: Optional[Float32],
window_size_left: Optional[Int32],
window_size_right: Optional[Int32],
learnable_sink: Optional[cute.Tensor],
blocksparse_tensors: Optional[BlockSparseTensors],
sQ_layout: cute.ComposedLayout,
sK_layout: cute.ComposedLayout,
sV_layout: cute.ComposedLayout,
sO_layout: cute.ComposedLayout,
sP_layout: cute.ComposedLayout | None,
gmem_tiled_copy_Q: cute.TiledCopy,
gmem_tiled_copy_K: cute.TiledCopy,
gmem_tiled_copy_V: cute.TiledCopy,
gmem_tiled_copy_O: cute.TiledCopy,
tiled_mma_qk: cute.TiledMma,
tiled_mma_pv: cute.TiledMma,
tile_sched_params: ParamsBase,
TileScheduler: cutlass.Constexpr[Callable],
SharedStorage: cutlass.Constexpr[Callable],
aux_tensors=Optional[list[cute.Tensor]],
fastdiv_mods=None,
):
warp_idx = cute.arch.make_warp_uniform(cute.arch.warp_idx())
# Prefetch tma descriptor
if warp_idx == 0:
for tma_atom in (tma_atom_Q, tma_atom_K, tma_atom_V, tma_atom_O):
if const_expr(tma_atom is not None):
cpasync.prefetch_descriptor(tma_atom)
smem = cutlass.utils.SmemAllocator()
storage = smem.allocate(SharedStorage)
# Mbarrier init
mbar_ptr_Q = storage.mbar_ptr.data_ptr()
if warp_idx == 1:
# if tidx < 2:
# # barrierO num threads should be self.num_mma_threads
# cute.arch.mbarrier_init(mbar_ptr_Q + tidx, 1 if tidx == 0 else self.num_mma_threads)
if const_expr(not self.use_tma_Q):
cute.arch.mbarrier_init(mbar_ptr_Q, self.num_Q_load_threads)
# cute.arch.mbarrier_init(mbar_ptr_Q + 1, self.num_mma_threads)
# We rely on pipeline_k and pipeline_v to initialize the mbarrier fence and sync
pipeline_kv_producer_group = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread
)
pipeline_kv_consumer_group = cutlass.pipeline.CooperativeGroup(
cutlass.pipeline.Agent.Thread, self.num_mma_threads // cute.arch.WARP_SIZE
)
pipeline_k = pipeline.PipelineTmaAsync.create(
barrier_storage=storage.mbar_ptr_K.data_ptr(),
num_stages=self.num_stages,
producer_group=pipeline_kv_producer_group,
consumer_group=pipeline_kv_consumer_group,
tx_count=self.tma_copy_bytes["K"],
defer_sync=True,
)
pipeline_v = pipeline.PipelineTmaAsync.create(
barrier_storage=storage.mbar_ptr_V.data_ptr(),
num_stages=self.num_stages,
producer_group=pipeline_kv_producer_group,
consumer_group=pipeline_kv_consumer_group,
tx_count=self.tma_copy_bytes["V"],
defer_sync=False
)
# ///////////////////////////////////////////////////////////////////////////////
# Get shared memory buffer
# ///////////////////////////////////////////////////////////////////////////////
sQ = storage.sQ.get_tensor(sQ_layout.outer, swizzle=sQ_layout.inner)
sK = storage.sK.get_tensor(sK_layout.outer, swizzle=sK_layout.inner)
if const_expr(not self.Q_in_regs):
sV = storage.sV.get_tensor(sV_layout.outer, swizzle=sV_layout.inner)
else:
sV = storage.sQ.get_tensor(
sV_layout.outer, swizzle=sV_layout.inner, dtype=mV.element_type
)
# Transpose view of V to tensor with layout (head_dim_v, tile_n) for tiled mma
sVt = layout_utils.transpose_view(sV)
sP = None
if const_expr(sP_layout is not None):
sP = storage.sP.get_tensor(sP_layout.outer, swizzle=sP_layout.inner)
# reuse sQ's data iterator
sO = storage.sQ.get_tensor(sO_layout.outer, swizzle=sO_layout.inner, dtype=self.dtype)
block_info = BlockInfo(
self.tile_m,
self.tile_n,
self.is_causal,
self.is_local,
False, # is_split_kv
window_size_left,
window_size_right,
qhead_per_kvhead_packgqa=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
)
SeqlenInfoCls = partial(
SeqlenInfoQK.create,
seqlen_q_static=mQ.shape[0] if const_expr(not self.pack_gqa) else mQ.shape[0][1],
seqlen_k_static=mK.shape[0],
mCuSeqlensQ=mCuSeqlensQ,
mCuSeqlensK=mCuSeqlensK,
mSeqUsedQ=mSeqUsedQ,
mSeqUsedK=mSeqUsedK,
)
AttentionMaskCls = partial(
AttentionMask,
self.tile_m,
self.tile_n,
window_size_left=window_size_left,
window_size_right=window_size_right,
qhead_per_kvhead_packgqa=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
)
TileSchedulerCls = partial(TileScheduler.create, tile_sched_params)
if warp_idx < 4: # Producer
cute.arch.setmaxregister_decrease(self.num_producer_regs)
self.load(
mQ,
mK,
mV,
sQ,
sK,
sV,
tma_atom_Q,
tma_atom_K,
tma_atom_V,
pipeline_k,
pipeline_v,
mbar_ptr_Q,
blocksparse_tensors,
block_info,
SeqlenInfoCls,
TileSchedulerCls,
)
else: # Consumer
cute.arch.setmaxregister_increase(self.num_mma_regs)
# ///////////////////////////////////////////////////////////////////////////////
# Tile MMA compute thread partitions and allocate accumulators
# ///////////////////////////////////////////////////////////////////////////////
tidx, _, _ = cute.arch.thread_idx()
tidx = tidx - 128
self.mma(
tiled_mma_qk,
tiled_mma_pv,
mQ,
mO,
mLSE,
sQ,
sK,
sVt,
sP,
sO,
learnable_sink,
pipeline_k,
pipeline_v,
mbar_ptr_Q,
gmem_tiled_copy_Q,
gmem_tiled_copy_O,
tma_atom_O,
tidx,
softmax_scale_log2,
softmax_scale,
block_info,
SeqlenInfoCls,
AttentionMaskCls,
TileSchedulerCls,
blocksparse_tensors,
aux_tensors,
fastdiv_mods,
)
@cute.jit
def load(
self,
mQ: cute.Tensor,
mK: cute.Tensor,
mV: cute.Tensor,
sQ: cute.Tensor,
sK: cute.Tensor,
sV: cute.Tensor,
tma_atom_Q: cute.CopyAtom,
tma_atom_K: cute.CopyAtom,
tma_atom_V: cute.CopyAtom,
pipeline_k: cutlass.pipeline.PipelineAsync,
pipeline_v: cutlass.pipeline.PipelineAsync,
mbar_ptr_Q: cutlass.Pointer,
blocksparse_tensors: Optional[BlockSparseTensors],
block_info: BlockInfo,
SeqlenInfoCls: Callable,
TileSchedulerCls: Callable,
):
warp_idx_in_wg = cute.arch.make_warp_uniform(cute.arch.warp_idx()) % 4
if warp_idx_in_wg == 0:
q_producer_phase = Int32(1)
kv_producer_state = pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Producer, self.num_stages
)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
while work_tile.is_valid_tile:
# if work_tile.is_valid_tile:
m_block, head_idx, batch_idx, _ = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
mQ_cur = seqlen.offset_batch_Q(mQ, batch_idx, dim=3)[None, None, head_idx]
head_idx_kv = (
head_idx // self.qhead_per_kvhead if const_expr(not self.pack_gqa) else head_idx
)
mK_cur = seqlen.offset_batch_K(mK, batch_idx, dim=3)[None, None, head_idx_kv]
mV_cur = seqlen.offset_batch_K(mV, batch_idx, dim=3)[None, None, head_idx_kv]
gK = cute.local_tile(mK_cur, (self.tile_n, self.tile_hdim), (None, 0))
gV = cute.local_tile(mV_cur, (self.tile_n, self.tile_hdimv), (None, 0))
load_Q = None
if const_expr(self.use_tma_Q):
gQ = cute.local_tile(mQ_cur, (self.tile_m, self.tile_hdim), (m_block, 0))
load_Q, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_Q, 0, cute.make_layout(1), gQ, sQ, single_stage=True
)
# TODO: mcast
# TODO check warp_idx if we have 128 producer threads
load_K, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_K, 0, cute.make_layout(1), gK, sK
)
load_K = copy_utils.tma_producer_copy_fn(load_K, pipeline_k)
load_V, _, _ = copy_utils.tma_get_copy_fn(
tma_atom_V, 0, cute.make_layout(1), gV, sV
)
load_V = copy_utils.tma_producer_copy_fn(load_V, pipeline_v)
if const_expr(not self.use_block_sparsity):
n_block_min, n_block_max = block_info.get_n_block_min_max(seqlen, m_block)
# if cute.arch.thread_idx()[0] == 0:
# cute.printf("m_block = %d, n_block_min: %d, n_block_max: %d", m_block, n_block_min, n_block_max)
# First iteration: load both Q & K with the same mbarrier
n_block = n_block_max - 1
pipeline_k.producer_acquire(
kv_producer_state,
extra_tx_count=self.tma_copy_bytes["Q"]
if const_expr(self.use_tma_Q)
else 0,
)
if const_expr(self.use_tma_Q):
load_Q(tma_bar_ptr=pipeline_k.producer_get_barrier(kv_producer_state))
load_K(src_idx=n_block, producer_state=kv_producer_state)
if const_expr(not self.intra_wg_overlap):
pipeline_v.producer_acquire(kv_producer_state)
load_V(src_idx=n_block, producer_state=kv_producer_state)
kv_producer_state.advance()
for i in cutlass.range(n_block_max - 1 - n_block_min, unroll=1):
n_block = n_block_max - 1 - i - 1
pipeline_k.producer_acquire(kv_producer_state)
load_K(src_idx=n_block, producer_state=kv_producer_state)
pipeline_v.producer_acquire(kv_producer_state)
load_V(src_idx=n_block, producer_state=kv_producer_state)
kv_producer_state.advance()
else:
for i in cutlass.range(n_block_max - 1 - n_block_min, unroll=1):
n_block_prev = n_block_max - i - 1
n_block = n_block_prev - 1
kv_producer_state_prev = kv_producer_state.clone()
kv_producer_state.advance()
pipeline_k.producer_acquire(kv_producer_state)
load_K(src_idx=n_block, producer_state=kv_producer_state)
pipeline_v.producer_acquire(kv_producer_state_prev)
load_V(src_idx=n_block_prev, producer_state=kv_producer_state_prev)
n_block = n_block_min
pipeline_v.producer_acquire(kv_producer_state)
load_V(src_idx=n_block, producer_state=kv_producer_state)
kv_producer_state.advance()
else:
kv_producer_state = produce_block_sparse_loads(
blocksparse_tensors,
batch_idx,
head_idx,
m_block,
kv_producer_state,
load_Q,
load_K,
load_V,
pipeline_k,
pipeline_v,
self.use_tma_Q,
self.tma_copy_bytes["Q"],
self.intra_wg_overlap,
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
self.q_subtile_factor if self.q_subtile_factor is not None else 1,
)
tile_scheduler.prefetch_next_work()
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
# End of persistent scheduler loop
@cute.jit
def mma(
self,
tiled_mma_qk: cute.TiledMma,
tiled_mma_pv: cute.TiledMma,
# softmax: Softmax,
# acc_O: cute.Tensor,
mQ: cute.Tensor,
mO: cute.Tensor,
mLSE: Optional[cute.Tensor],
sQ: cute.Tensor,
sK: cute.Tensor,
sVt: cute.Tensor,
sP: Optional[cute.Tensor],
sO: cute.Tensor,
learnable_sink: Optional[cute.Tensor],
pipeline_k: cutlass.pipeline.PipelineAsync,
pipeline_v: cutlass.pipeline.PipelineAsync,
mbar_ptr_Q: cutlass.Pointer,
gmem_tiled_copy_Q: cute.TiledCopy,
gmem_tiled_copy_O: cute.TiledCopy,
tma_atom_O: Optional[cute.CopyAtom],
tidx: Int32,
softmax_scale_log2: Float32,
softmax_scale: Optional[Float32],
block_info: BlockInfo,
SeqlenInfoCls: Callable,
AttentionMaskCls: Callable,
TileSchedulerCls: Callable,
blocksparse_tensors: Optional[BlockSparseTensors],
aux_tensors: Optional[list],
fastdiv_mods=None,
):
warp_group_idx = cute.arch.make_warp_uniform(tidx // self.num_threads_per_warp_group)
warp_group_thread_layout = cute.make_layout(
self.num_mma_warp_groups, stride=self.num_threads_per_warp_group
)
thr_mma_qk = tiled_mma_qk.get_slice(tidx)
wg_mma_qk = tiled_mma_qk.get_slice(warp_group_thread_layout(warp_group_idx))
wg_mma_pv = tiled_mma_pv.get_slice(warp_group_thread_layout(warp_group_idx))
_, tSrQ, tSrK = sm90_utils.partition_fragment_ABC(
wg_mma_qk, (self.tile_m, self.tile_n, self.tile_hdim), sQ, sK
)
mma_qk_fn = partial(
sm90_utils.gemm_zero_init, tiled_mma_qk, (self.tile_m, self.tile_n), tSrQ, tSrK
)
acc_O, tOrP, tOrVt = sm90_utils.partition_fragment_ABC(
wg_mma_pv, (self.tile_m, self.tile_hdimv, self.tile_n), sP, sVt
)
mma_pv_fn = partial(sm90_utils.gemm_w_idx, tiled_mma_pv, acc_O, tOrP, tOrVt)
# ///////////////////////////////////////////////////////////////////////////////
# Smem copy atom tiling
# ///////////////////////////////////////////////////////////////////////////////
smem_copy_atom_P = utils.get_smem_store_atom(self.arch, self.dtype)
smem_thr_copy_P = cute.make_tiled_copy_C(smem_copy_atom_P, tiled_mma_qk).get_slice(tidx)
tPsP = smem_thr_copy_P.partition_D(sP) if const_expr(sP is not None) else None
smem_copy_params = SimpleNamespace(smem_thr_copy_P=smem_thr_copy_P, tPsP=tPsP)
self.mma_init()
mma_one_n_block_all = partial(
self.mma_one_n_block_intrawg_overlap
if const_expr(self.intra_wg_overlap)
else self.mma_one_n_block,
mma_qk_fn=mma_qk_fn,
pipeline_k=pipeline_k,
pipeline_v=pipeline_v,
acc_O=acc_O,
tOrP=tOrP,
smem_copy_params=smem_copy_params,
check_inf=True,
)
q_consumer_phase = Int32(0)
kv_consumer_state = pipeline.make_pipeline_state(
cutlass.pipeline.PipelineUserType.Consumer, self.num_stages
)
tile_scheduler = TileSchedulerCls()
work_tile = tile_scheduler.initial_work_tile_info()
softmax = Softmax.create(
softmax_scale_log2,
num_rows=acc_O.shape[0][0] * acc_O.shape[1],
softmax_scale=softmax_scale,
)
process_first_half_block = partial(
self.first_half_block_overlap,
mma_qk_fn=mma_qk_fn,
pipeline_k=pipeline_k,
tOrP=tOrP,
smem_copy_params=smem_copy_params,
softmax=softmax,
)
process_last_half_block = partial(
self.last_half_block_overlap,
pipeline_v=pipeline_v,
mma_pv_fn=mma_pv_fn,
)
while work_tile.is_valid_tile:
# if work_tile.is_valid_tile:
# shape: (atom_v_m * rest_m)
m_block, head_idx, batch_idx, _ = work_tile.tile_idx
seqlen = SeqlenInfoCls(batch_idx)
# Recompute fastdiv_mods if necessary for varlen with aux_tensors
recompute_fastdiv_mods_q = cutlass.const_expr(
aux_tensors is not None and (seqlen.has_cu_seqlens_q or seqlen.has_seqused_q)
)
recompute_fastdiv_mods_k = cutlass.const_expr(
aux_tensors is not None and (seqlen.has_cu_seqlens_k or seqlen.has_seqused_k)
)
if cutlass.const_expr(fastdiv_mods is not None):
seqlen_q_divmod, seqlen_k_divmod = fastdiv_mods
fastdiv_mods = (
seqlen_q_divmod
if not recompute_fastdiv_mods_q
else FastDivmodDivisor(seqlen.seqlen_q),
seqlen_k_divmod
if not recompute_fastdiv_mods_k
else FastDivmodDivisor(seqlen.seqlen_k),
)
mask = AttentionMaskCls(seqlen)
mask_fn = partial(
mask.apply_mask,
batch_idx=batch_idx,
head_idx=head_idx,
m_block=m_block,
thr_mma=thr_mma_qk,
mask_causal=self.is_causal,
mask_local=self.is_local,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
score_mod_fn = None
if const_expr(self.score_mod is not None):
score_mod_fn = partial(
self.apply_score_mod,
thr_mma_qk,
batch_idx,
head_idx,
m_block,
softmax_scale=softmax_scale,
aux_tensors=aux_tensors,
fastdiv_mods=fastdiv_mods,
)
mma_one_n_block = partial(
mma_one_n_block_all,
seqlen=seqlen,
softmax=softmax,
score_mod_fn=score_mod_fn,
)
# Load Q if not TMA_Q
if const_expr(not self.use_tma_Q):
pack_gqa = PackGQA(
self.tile_m, self.tile_hdim, self.check_hdim_oob, self.qhead_per_kvhead
)
mQ_cur = seqlen.offset_batch_Q(mQ, batch_idx, dim=3)[None, None, head_idx]
# gmem_thr_copy_Q = gmem_tiled_copy_Q.get_slice(tidx)
# gQ = cute.local_tile(mQ_cur, (self.tile_m, self.tile_hdim), (m_block, 0))
# self.load_Q(gmem_thr_copy_Q, gQ, sQ, m_block, seqlen=seqlen.seqlen_q,
# headdim=mQ.shape[1])
pack_gqa.load_Q(mQ_cur, sQ, gmem_tiled_copy_Q, tidx, m_block, seqlen.seqlen_q)
cute.arch.cp_async_mbarrier_arrive_noinc(mbar_ptr_Q)
n_block_min, n_block_max = block_info.get_n_block_min_max(seqlen, m_block)
if const_expr(not self.use_tma_Q):
cute.arch.mbarrier_wait(mbar_ptr_Q, phase=q_consumer_phase)
q_consumer_phase ^= 1
# For performance reason, we separate out two kinds of iterations:
# those that need masking on S, and those that don't.
# We need masking on S for the very last block when K and V has length not multiple of tile_n.
# We also need masking on S if it's causal, for the last several blocks.
# softmax.reset() # Don't need reset as we explicitly call softmax w is_first=True
O_should_accumulate = False
# ==========================================
# MAINLOOP
# ==========================================
if const_expr(not self.use_block_sparsity):
# ==========================================
# No block-sparsity (original path)
# ==========================================
# First iteration with seqlen masking
if const_expr(self.intra_wg_overlap):
kv_consumer_state = process_first_half_block(
n_block=n_block_max - 1,
seqlen=seqlen,
kv_consumer_state=kv_consumer_state,
mask_fn=partial(mask_fn, mask_mod=self.mask_mod),
score_mod_fn=score_mod_fn,
is_first_block=True,
)
# Need to initialize tOrO in the case of RescaleOBeforeGemm where we will scale tOrO even in the 1st iter
# acc_O.fill(0.0)
else:
self.warp_scheduler_barrier_sync()
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=n_block_max - 1,
seqlen=seqlen,
mma_pv_fn=partial(mma_pv_fn, zero_init=True),
is_first_n_block=True,
mask_fn=partial(mask_fn, mask_mod=self.mask_mod, mask_seqlen=True),
)
O_should_accumulate = True
# if cute.arch.thread_idx()[0] == 128: cute.printf("m_block = {}, n_block_max = {}, n_block_min = {}", m_block, n_block_max, n_block_min)
n_block_max -= 1
# Next couple of iterations with causal masking
if const_expr(self.is_causal or self.is_local):
n_block_min_causal_local_mask = block_info.get_n_block_min_causal_local_mask(
seqlen, m_block, n_block_min
)
# if cute.arch.thread_idx()[0] == 128: cute.printf("n_block_min_causal_local_mask = {}", n_block_min_causal_local_mask)
for n_tile in cutlass.range(
n_block_max - n_block_min_causal_local_mask, unroll=1
):
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=n_block_max - 1 - n_tile,
seqlen=seqlen,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_mod=self.mask_mod, mask_seqlen=False),
)
O_should_accumulate = True
n_block_max = cutlass.min(n_block_max, n_block_min_causal_local_mask)
# The remaining iterations have no masking
n_block_min_before_local_mask = block_info.get_n_block_min_before_local_mask(
seqlen, m_block, n_block_min
)
# if cute.arch.thread_idx()[0] == 128: cute.printf("n_block_min_before_local_mask = {}, n_block_min = {}", n_block_min_before_local_mask, n_block_min)
for n_tile in cutlass.range(n_block_max - n_block_min_before_local_mask, unroll=1):
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=n_block_max - 1 - n_tile,
seqlen=seqlen,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_mod=self.mask_mod, mask_seqlen=False),
)
O_should_accumulate = True
# Separate iterations with local masking on the left
if const_expr(self.is_local and block_info.window_size_left is not None):
n_block_max = cutlass.min(n_block_max, n_block_min_before_local_mask)
for n_tile in cutlass.range(n_block_max - n_block_min, unroll=1):
kv_consumer_state = mma_one_n_block(
kv_consumer_state,
n_block=n_block_max - 1 - n_tile,
seqlen=seqlen,
mma_pv_fn=partial(mma_pv_fn, zero_init=not O_should_accumulate),
mask_fn=partial(mask_fn, mask_mod=self.mask_mod, mask_seqlen=False),
)
O_should_accumulate = True
# Last "half" iteration
if const_expr(self.intra_wg_overlap):
kv_consumer_state = process_last_half_block(
kv_consumer_state=kv_consumer_state,
zero_init=not O_should_accumulate,
)
O_should_accumulate = True
else:
self.warp_scheduler_barrier_arrive()
else:
# ==========================================
# Block sparsity
# ==========================================
kv_consumer_state, O_should_accumulate, processed_any = consume_block_sparse_loads(
blocksparse_tensors,
batch_idx,
head_idx,
m_block,
seqlen,
kv_consumer_state,
mma_pv_fn,
mma_one_n_block,
process_first_half_block,
process_last_half_block,
mask_fn,
score_mod_fn,
O_should_accumulate,
self.mask_mod,
fastdiv_mods,
self.intra_wg_overlap,
self.warp_scheduler_barrier_sync,
self.warp_scheduler_barrier_arrive,
self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
self.q_subtile_factor if self.q_subtile_factor is not None else 1,
)
# Handle empty case (when no blocks to process)
if not processed_any:
softmax.reset()
acc_O.fill(0.0)
sink_val = None
if const_expr(learnable_sink is not None):
if const_expr(not self.pack_gqa):
sink_val = Float32(learnable_sink[head_idx])
else: # Each thread might have a different sink value due to different q_head
sink_val = cute.make_fragment_like(softmax.row_max, Float32)
cS = cute.make_identity_tensor((self.tile_m, self.tile_n))
tScS_mn = layout_utils.reshape_acc_to_mn(thr_mma_qk.partition_C(cS))
for r in cutlass.range(cute.size(sink_val), unroll_full=True):
row = m_block * self.tile_m + tScS_mn[r][0]
q_head_idx = row % self.qhead_per_kvhead + head_idx * self.qhead_per_kvhead
sink_val[r] = Float32(learnable_sink[q_head_idx])
# normalize acc_O by row_sum and calculate the lse
row_scale = softmax.finalize(sink_val=sink_val)
softmax.rescale_O(acc_O, row_scale)
# ///////////////////////////////////////////////////////////////////////////////
# Epilogue
# ///////////////////////////////////////////////////////////////////////////////
self.epilogue(
acc_O,
softmax.row_sum,
mO,
mLSE,
sO,
seqlen,
gmem_tiled_copy_O,
tma_atom_O,
tiled_mma_pv,
tidx,
m_block,
head_idx,
batch_idx,
)
tile_scheduler.advance_to_next_work()
work_tile = tile_scheduler.get_current_work()
@cute.jit
def first_half_block_overlap(
self,
n_block: Int32,
mma_qk_fn: Callable,
kv_consumer_state,
pipeline_k,
tOrP: cute.Tensor,
smem_copy_params: SimpleNamespace,
softmax: Softmax,
seqlen: SeqlenInfoQK,
mask_fn: Callable = None,
score_mod_fn: Optional[Callable] = None,
is_first_block: bool = False,
):
"""Processes the first half block when using intra-warpgroup-overlap"""
pipeline_k.consumer_wait(kv_consumer_state, pipeline_k.consumer_try_wait(kv_consumer_state))
acc_S = mma_qk_fn(B_idx=kv_consumer_state.index, wg_wait=0)
pipeline_k.consumer_release(kv_consumer_state)
# Apply score modification if present
if const_expr(score_mod_fn is not None):
score_mod_fn(acc_S, n_block=n_block, seqlen=seqlen)
# Apply mask; mask_seqlen always True for first block
# Caveat: if full block further right than mask block, seqlen masking is redundant;
# however, masking is being applied anyway, so essentially no perf hit
mask_fn(acc_S, n_block=n_block, mask_seqlen=True)
softmax.online_softmax(acc_S, is_first=is_first_block)
tOrP_acc = layout_utils.reshape_acc_to_frgA(acc_S)
tOrP_cur = (
tOrP if const_expr(self.mma_pv_is_rs) else cute.make_fragment_like(tOrP_acc, self.dtype)
)
tOrP_cur.store(tOrP_acc.load().to(self.dtype))
# if pv gemm not rs
if const_expr(not self.mma_pv_is_rs):
tPrP = smem_copy_params.smem_thr_copy_P.retile(tOrP_cur)
cute.copy(smem_copy_params.smem_thr_copy_P, tPrP, smem_copy_params.tPsP)
# Fence and barrier to make smem store visible to WGMMA
cute.arch.fence_view_async_shared()
cute.arch.sync_warp()
return kv_consumer_state
@cute.jit
def last_half_block_overlap(
self,
kv_consumer_state,
pipeline_v,
mma_pv_fn: Callable,
zero_init: bool,
):
"""Processes the final PV GEMM when using intra-warpgroup-overlap"""
pipeline_v.consumer_wait(kv_consumer_state, pipeline_v.consumer_try_wait(kv_consumer_state))
mma_pv_fn(B_idx=kv_consumer_state.index, zero_init=zero_init, wg_wait=0)
pipeline_v.consumer_release(kv_consumer_state)
kv_consumer_state.advance()
return kv_consumer_state
@cute.jit
def mma_one_n_block(
self,
smem_pipe_read: cutlass.pipeline.PipelineState | pipeline.PipelineStateSimple,
n_block: Int32,
mma_qk_fn: Callable,
mma_pv_fn: Callable,
pipeline_k: cutlass.pipeline.PipelineAsync,
pipeline_v: cutlass.pipeline.PipelineAsync,
acc_O: cute.Tensor,
tOrP: cute.Tensor,
smem_copy_params: SimpleNamespace,
softmax: Softmax,
seqlen: SeqlenInfoQK,
score_mod_fn: Optional[Callable] = None,
mask_fn: Optional[Callable] = None,
is_first_n_block: cutlass.Constexpr = False,
check_inf: cutlass.Constexpr = True,
):
pipeline_k.consumer_wait(smem_pipe_read, pipeline_k.consumer_try_wait(smem_pipe_read))
# S = Q @ K.T
acc_S = mma_qk_fn(B_idx=smem_pipe_read.index, wg_wait=-1)
self.warp_scheduler_barrier_arrive()
warpgroup.wait_group(0)
pipeline_k.consumer_release(smem_pipe_read)
# handle score mods and masking
if const_expr(score_mod_fn is not None):
score_mod_fn(acc_S, n_block=n_block, seqlen=seqlen)
if const_expr(mask_fn is not None):
mask_fn(acc_S=acc_S, n_block=n_block)
row_scale = softmax.online_softmax(acc_S, is_first=is_first_n_block, check_inf=check_inf)
# if cute.arch.thread_idx()[0] == 0: cute.print_tensor(layout_utils.reshape_acc_to_mn(acc_S))
tOrP_acc = layout_utils.reshape_acc_to_frgA(acc_S)
tOrP_cur = (
tOrP if const_expr(self.mma_pv_is_rs) else cute.make_fragment_like(tOrP_acc, self.dtype)
)
# tOrP.store(tOrP_acc.load().to(self.dtype))
# the "to(self.dtype)" conversion fails to vectorize for block sizes other
# than 128 x 128, i.e. it calls convert on 1 fp32 element at a time instead of
# 2 elements. So we just call ptx directly.
utils.cvt_f16(tOrP_acc, tOrP_cur)
if const_expr(not self.mma_pv_is_rs):
tPrP = smem_copy_params.smem_thr_copy_P.retile(tOrP_cur)
cute.copy(smem_copy_params.smem_thr_copy_P, tPrP, smem_copy_params.tPsP)
softmax.rescale_O(acc_O, row_scale)
if const_expr(not self.mma_pv_is_rs):
# Fence and barrier to make sure smem store is visible to WGMMA
cute.arch.fence_view_async_shared()
cute.arch.sync_warp() # Only need syncwarp since each warp is using its own P values for MmaPV
pipeline_v.consumer_wait(smem_pipe_read, pipeline_v.consumer_try_wait(smem_pipe_read))
self.warp_scheduler_barrier_sync()
# O += P @ V
mma_pv_fn(B_idx=smem_pipe_read.index, wg_wait=0)
pipeline_v.consumer_release(smem_pipe_read)
smem_pipe_read.advance()
return smem_pipe_read
@cute.jit
def mma_one_n_block_intrawg_overlap(
self,
smem_pipe_read: cutlass.pipeline.PipelineState | pipeline.PipelineStateSimple,
n_block: Int32,
mma_qk_fn: Callable,
mma_pv_fn: Callable,
pipeline_k: cutlass.pipeline.PipelineAsync,
pipeline_v: cutlass.pipeline.PipelineAsync,
acc_O: cute.Tensor,
tOrP: cute.Tensor,
smem_copy_params: SimpleNamespace,
softmax: Softmax,
seqlen: SeqlenInfoQK,
score_mod_fn: Optional[Callable] = None,
mask_fn: Optional[Callable] = None,
check_inf: cutlass.Constexpr = True,
):
smem_pipe_read_v = smem_pipe_read.clone()
smem_pipe_read.advance()
pipeline_k.consumer_wait(smem_pipe_read, pipeline_k.consumer_try_wait(smem_pipe_read))
self.warp_scheduler_barrier_sync()
# S = Q @ K.T
acc_S = mma_qk_fn(B_idx=smem_pipe_read.index, wg_wait=-1)
pipeline_v.consumer_wait(smem_pipe_read_v, pipeline_v.consumer_try_wait(smem_pipe_read_v))
# O += P @ V
mma_pv_fn(B_idx=smem_pipe_read_v.index, wg_wait=-1)
self.warp_scheduler_barrier_arrive()
warpgroup.wait_group(1)
pipeline_k.consumer_release(smem_pipe_read)
# handle score mods and masking
if const_expr(score_mod_fn is not None):
score_mod_fn(acc_S, n_block=n_block, seqlen=seqlen)
if const_expr(mask_fn is not None):
mask_fn(acc_S=acc_S, n_block=n_block)
# if cute.arch.thread_idx()[0] == 128: cute.print_tensor(layout_utils.reshape_acc_to_mn(acc_S))
row_scale = softmax.online_softmax(acc_S, check_inf=check_inf)
warpgroup.wait_group(0)
pipeline_v.consumer_release(smem_pipe_read_v)
tOrP_acc = layout_utils.reshape_acc_to_frgA(acc_S)
tOrP_cur = (
tOrP if const_expr(self.mma_pv_is_rs) else cute.make_fragment_like(tOrP_acc, self.dtype)
)
# tOrP_cur.store(tOrP_acc.load().to(self.dtype))
# the "to(self.dtype)" conversion fails to vectorize for block sizes other
# than 128 x 128, i.e. it calls convert on 1 fp32 element at a time instead of
# 2 elements. So we just call ptx directly.
utils.cvt_f16(tOrP_acc, tOrP_cur)
if const_expr(not self.mma_pv_is_rs):
tPrP = smem_copy_params.smem_thr_copy_P.retile(tOrP_cur)
cute.copy(smem_copy_params.smem_thr_copy_P, tPrP, smem_copy_params.tPsP)
softmax.rescale_O(acc_O, row_scale)
if const_expr(not self.mma_pv_is_rs):
# Fence and barrier to make sure smem store is visible to WGMMA
cute.arch.fence_view_async_shared()
cute.arch.sync_warp() # Only need syncwarp since each warp is using its own P values for MmaPV
return smem_pipe_read
@cute.jit
def mma_init(self):
warp_group_idx = utils.canonical_warp_group_idx(sync=False)
if const_expr(self.use_scheduler_barrier):
if warp_group_idx == 1:
cute.arch.barrier_arrive(
barrier_id=int(NamedBarrierFwd.WarpSchedulerWG1),
number_of_threads=2 * self.num_threads_per_warp_group,
)
@cute.jit
def apply_score_mod(
self,
thr_mma_qk,
batch_idx,
head_idx,
m_block,
acc_S,
n_block,
softmax_scale,
seqlen,
aux_tensors: Optional[list] = None,
fastdiv_mods=None,
):
# Prepare index tensor
cS = cute.make_identity_tensor((self.tile_m, self.tile_n))
cS = cute.domain_offset((m_block * self.tile_m, n_block * self.tile_n), cS)
tScS = thr_mma_qk.partition_C(cS)
apply_score_mod_inner(
acc_S,
tScS,
self.score_mod,
batch_idx,
head_idx,
softmax_scale,
self.vec_size,
self.qk_acc_dtype,
aux_tensors,
fastdiv_mods,
seqlen_info=seqlen,
constant_q_idx=None,
qhead_per_kvhead=self.qhead_per_kvhead if const_expr(self.pack_gqa) else 1,
)
def warp_scheduler_barrier_sync(self):
if const_expr(self.use_scheduler_barrier):
cute.arch.barrier(
barrier_id=int(NamedBarrierFwd.WarpSchedulerWG1)
- 1
+ utils.canonical_warp_group_idx(sync=False),
number_of_threads=2 * self.num_threads_per_warp_group,
)
def warp_scheduler_barrier_arrive(self):
if const_expr(self.use_scheduler_barrier):
assert self.num_mma_warp_groups in [2, 3]
cur_wg = utils.canonical_warp_group_idx(sync=False) - 1
if const_expr(self.num_mma_warp_groups == 2):
next_wg = 1 - cur_wg
else:
t = cur_wg + 1
next_wg = t % self.num_mma_warp_groups
cute.arch.barrier_arrive(
barrier_id=int(NamedBarrierFwd.WarpSchedulerWG1) + next_wg,
number_of_threads=2 * self.num_threads_per_warp_group,
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/flash_fwd.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 2283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/interface.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
# [2025-07-04] Version in Cute-DSL, for Hopper and Blackwell. You'll need install nvidia-cutlass-dsl==4.2.0.
# Supported features:
# - BF16 & FP16 dtype
# - noncausal & causal attention
# - MHA, GQA, MQA
# - hdim 64, 96, 128.
# - (hdim_qk, hdim_v) = (192, 128) for Blackwell (i.e. DeepSeek shape)
# - varlen
# - sliding window
# - bwd pass for Ampere (will also run on Hopper/Blackwell, but will be slow)
# Features not supported yet:
# - split (i.e. FlashDecoding)
# - tuned block sizes
# - paged KV
# - append KV to existing KV cache
# - FP8
# - bwd pass optimized for Hopper/Blackwell
import os
import math
from functools import lru_cache
from typing import Optional, Tuple, Callable
import torch
import cuda.bindings.driver as cuda
import cutlass
import cutlass.cute as cute
from flash_attn.cute.cache_utils import get_jit_cache
from flash_attn.cute.testing import is_fake_mode
if os.environ.get("CUTE_DSL_PTXAS_PATH", None) is not None:
from flash_attn.cute import cute_dsl_ptxas # noqa: F401
# Patch to dump ptx and then use system ptxas to compile to cubin
cute_dsl_ptxas.patch()
from flash_attn.cute import utils
from flash_attn.cute.cute_dsl_utils import (
to_cute_tensor, to_cute_aux_tensor, get_aux_tensor_metadata, get_broadcast_dims,
)
from flash_attn.cute.flash_fwd import FlashAttentionForwardSm90
from flash_attn.cute.flash_fwd_sm100 import FlashAttentionForwardSm100
from flash_attn.cute.flash_bwd_preprocess import FlashAttentionBackwardPreprocess
from flash_attn.cute.flash_bwd import FlashAttentionBackwardSm80
from flash_attn.cute.flash_bwd_sm90 import FlashAttentionBackwardSm90
from flash_attn.cute.flash_bwd_sm100 import FlashAttentionBackwardSm100
from flash_attn.cute.flash_bwd_postprocess import FlashAttentionBackwardPostprocess
from flash_attn.cute.flash_fwd_combine import FlashAttentionForwardCombine
from flash_attn.cute.block_sparsity import (
BlockSparseTensorsTorch,
to_cute_block_sparse_tensors,
normalize_block_sparse_config,
normalize_block_sparse_config_bwd,
)
@lru_cache(maxsize=None)
def _get_device_arch():
"""Cached device arch check."""
major, minor = torch.cuda.get_device_capability()
return major * 10 + minor
def maybe_contiguous(x):
return x.contiguous() if x is not None and x.stride(-1) != 1 else x
def _validate_tensor(t, name, expected_shape, expected_dtype, expected_device):
assert t.shape == expected_shape, f"{name} shape {t.shape} != expected {expected_shape}"
assert t.dtype == expected_dtype, f"{name} dtype {t.dtype} != expected {expected_dtype}"
assert t.device == expected_device, f"{name} device {t.device} != expected {expected_device}"
assert t.is_cuda, f"{name} must be on CUDA"
torch2cute_dtype_map = {
torch.float16: cutlass.Float16,
torch.bfloat16: cutlass.BFloat16,
torch.float32: cutlass.Float32,
}
def num_splits_heuristic(total_mblocks, num_SMs, num_n_blocks, max_splits):
# If num_n_blocks is too small, use 1 split. For example, we never split for hdim = 128 and seqlen_k = 512.
if num_n_blocks <= 4:
return 1
# NOTE: We should revisit this heuristic after persistence is supported for split KV.
# Sometimes, it's ideal to over-schedule splits for better efficiency.
return min(num_SMs // total_mblocks, max_splits, num_n_blocks)
def _flash_attn_fwd(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
cu_seqlens_q: Optional[torch.Tensor] = None,
cu_seqlens_k: Optional[torch.Tensor] = None,
seqused_q: Optional[torch.Tensor] = None,
seqused_k: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
max_seqlen_k: Optional[int] = None,
page_table: Optional[torch.Tensor] = None,
softmax_scale: Optional[float] = None,
causal: bool = False,
softcap: Optional[float] = None,
window_size_left: Optional[int] = None,
window_size_right: Optional[int] = None,
learnable_sink: Optional[torch.Tensor] = None,
# m_block_size: int = 128,
# n_block_size: int = 64,
# num_threads: int = 128,
m_block_size: int = 128,
n_block_size: int = 128,
num_threads: int = 384,
num_splits: int = 1,
pack_gqa: Optional[bool] = None,
_arch: Optional[int] = None,
score_mod: Optional[Callable] = None,
mask_mod: Optional[Callable] = None,
block_sparse_tensors: Optional[BlockSparseTensorsTorch] = None,
return_lse: bool = False,
out: Optional[torch.Tensor] = None,
lse: Optional[torch.Tensor] = None,
aux_tensors: Optional[list[torch.Tensor]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward pass for FlashAttention.
Args:
...
score_mod: A callable that takes the attention scores and applies a modification.
mask_mod: A callable that takes token position information and selectively masks
block_sparse_tensors: A tuple of tensors used for block sparsity.
return_lse: Whether to return the log softmax of the attention scores. If set to True will always calculate
Note: the returned LSE currently does not support taking gradient.
out: Optional pre-allocated output tensor. If None, will be allocated internally.
lse: Optional pre-allocated log-sum-exp tensor. If None, will be allocated when needed.
aux_tensors: Some score_mods will want to read from global aux_tensors. This is how we thread them through to the inner kernel.
"""
q, k, v = [maybe_contiguous(t) for t in (q, k, v)]
num_head, head_dim = q.shape[-2:]
if cu_seqlens_q is None:
batch_size, seqlen_q = q.shape[:2]
total_q = batch_size * seqlen_q
else:
batch_size = cu_seqlens_q.shape[0] - 1
seqlen_q = None
total_q = q.shape[0]
if page_table is not None:
assert cu_seqlens_k is None, "page_table is not supported with cu_seqlens_k"
assert page_table.dtype == torch.int32, "page_table must be int32"
assert page_table.stride(-1) == 1, "page_table must be contiguous in the last dimension"
max_num_pages_per_seq = page_table.shape[1]
assert page_table.shape == (batch_size, max_num_pages_per_seq)
num_pages, page_size = k.shape[:2]
seqlen_k = num_pages * page_size
else:
num_pages, page_size = None, None
seqlen_k = k.shape[-3]
num_head_kv = k.shape[-2]
head_dim_v = v.shape[-1]
if cu_seqlens_k is None:
if page_table is None:
assert k.shape == (batch_size, seqlen_k, num_head_kv, head_dim)
assert v.shape == (batch_size, seqlen_k, num_head_kv, head_dim_v)
else:
assert k.shape == (num_pages, page_size, num_head_kv, head_dim)
assert v.shape == (num_pages, page_size, num_head_kv, head_dim_v)
else:
assert k.shape == (seqlen_k, num_head_kv, head_dim)
assert v.shape == (seqlen_k, num_head_kv, head_dim_v)
assert cu_seqlens_k.shape == (batch_size + 1,), (
"cu_seqlens_k must have shape (batch_size + 1,)"
)
if cu_seqlens_q is not None:
assert cu_seqlens_q.shape == (batch_size + 1,), (
"cu_seqlens_q must have shape (batch_size + 1,)"
)
assert seqused_q is None or seqused_q.shape == (batch_size,), (
"seqused_q must have shape (batch_size,)"
)
assert seqused_k is None or seqused_k.shape == (batch_size,), (
"seqused_k must have shape (batch_size,)"
)
assert q.dtype in [torch.float16, torch.bfloat16], "inputs must be float16 or bfloat16"
assert q.dtype == k.dtype == v.dtype, "inputs must have the same dtype"
for t in [cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k]:
if t is not None:
assert t.dtype == torch.int32, (
"cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k must be int32"
)
assert t.stride(0) == 1, (
"cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k must be contiguous"
)
if learnable_sink is not None:
assert learnable_sink.shape == (num_head,)
assert learnable_sink.dtype == torch.bfloat16, "learnable_sink must be bfloat16"
assert all(
t is None or t.is_cuda
for t in (
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
page_table,
learnable_sink,
)
), "inputs must be on CUDA device"
assert num_head % num_head_kv == 0, "num_head must be divisible by num_head_kv"
assert head_dim <= 256, "head_dim must be less than or equal to 256"
alignment = 16 // q.element_size()
assert head_dim % alignment == 0, f"head_dim must be divisible by {alignment}"
assert head_dim_v % alignment == 0, f"head_dim_v must be divisible by {alignment}"
if softmax_scale is None:
softmax_scale = 1.0 / math.sqrt(head_dim)
if softcap == 0.0:
softcap = None
qhead_per_kvhead = num_head // num_head_kv
if pack_gqa is None:
pack_gqa = qhead_per_kvhead > 1
out_torch_dtype = q.dtype
device = q.device
q_batch_seqlen_shape = (batch_size, seqlen_q) if cu_seqlens_q is None else (total_q,)
lse_shape = (batch_size, num_head, seqlen_q) if cu_seqlens_q is None else (num_head, total_q)
requires_grad = q.requires_grad or k.requires_grad or v.requires_grad
if out is None:
out = torch.empty(
*q_batch_seqlen_shape, num_head, head_dim_v, dtype=out_torch_dtype, device=device
)
else:
_validate_tensor(out, "out", (*q_batch_seqlen_shape, num_head, head_dim_v), out_torch_dtype, device)
if lse is None:
lse = (
torch.empty(lse_shape, dtype=torch.float32, device=device)
if requires_grad or return_lse
else None
)
elif lse is not None:
_validate_tensor(lse, "lse", lse_shape, torch.float32, device)
dtype = torch2cute_dtype_map[q.dtype]
arch = _get_device_arch() if _arch is None else _arch
assert arch // 10 in [9, 10, 11], "Unsupported compute capability. Supported: 9.x, 10.x, 11.x"
use_block_sparsity = block_sparse_tensors is not None
if mask_mod is None:
if causal:
window_size_right = 0
if window_size_left is not None and window_size_right is not None and window_size_left + window_size_right < 0:
window_size_left = None
window_size_right = None
local = window_size_left is not None or window_size_right is not None
if window_size_left is not None or window_size_right is not None:
if window_size_left is None and window_size_right == 0:
causal, local = True, False
window_size_right = None
else:
causal, local = False, True
else:
causal, local = False, False
current_stream = cuda.CUstream(torch.cuda.current_stream().cuda_stream)
if arch // 10 == 9: # TODO: tune block size according to hdim.
if head_dim == head_dim_v == 128 and not causal and not local and not use_block_sparsity:
n_block_size = 192
if arch // 10 in [10, 11]:
if (
pack_gqa
and (128 % qhead_per_kvhead != 0)
):
pack_gqa = False
# TODO: fix GQA + SplitKV + non-varlen
if pack_gqa and num_splits != 1 and cu_seqlens_q is None:
pack_gqa = False
if max_seqlen_q is None:
max_seqlen_q = seqlen_q if cu_seqlens_q is None else total_q
if max_seqlen_k is None:
max_seqlen_k = seqlen_k
seqlen_q_packgqa = max_seqlen_q * qhead_per_kvhead
if arch // 10 == 10:
q_stage = 2 if seqlen_q_packgqa > m_block_size else 1
else:
q_stage = 1
if num_splits < 1:
m_block_size_effective = q_stage * m_block_size
seqlen_k_loaded = max_seqlen_k if not local else max(0, min(max_seqlen_k, window_size_right + window_size_left + 1 + m_block_size))
num_n_blocks = (seqlen_k_loaded + n_block_size - 1) // n_block_size
num_m_blocks = (seqlen_q_packgqa + m_block_size_effective - 1) // m_block_size_effective
total_mblocks = batch_size * num_head_kv * num_m_blocks
num_splits = num_splits_heuristic(
total_mblocks,
torch.cuda.get_device_properties(device).multi_processor_count,
num_n_blocks,
128,
)
is_split_kv = num_splits > 1
if is_split_kv:
out_partial = torch.empty(num_splits, *q_batch_seqlen_shape, num_head, head_dim_v, dtype=torch.float32, device=device)
lse_partial = torch.empty(num_splits, *lse_shape, dtype=torch.float32, device=device)
# hash score and mask mods for compile cache
score_mod_hash = utils.hash_callable(score_mod) if score_mod is not None else False
mask_mod_hash = utils.hash_callable(mask_mod) if mask_mod is not None else False
if softcap is not None:
assert score_mod is None, "softcap and score_mod cannot be used together"
score_mod = utils.create_softcap_scoremod(softcap)
is_varlen = (
cu_seqlens_q is not None
or cu_seqlens_k is not None
or seqused_q is not None
or seqused_k is not None
)
if mask_mod is not None:
if is_varlen:
raise NotImplementedError(
"mask_mod with aux_tensors is not yet supported for varlen sequences. This will be fixed in a future PR."
)
if use_block_sparsity:
if is_varlen:
raise NotImplementedError(
"Block sparsity is not yet supported for varlen sequences. This will be fixed in a future PR."
)
# NB: pack_gqa requires block sparse head dim == 1 (broadcasted)
if pack_gqa and block_sparse_tensors.mask_block_cnt.shape[1] != 1:
pack_gqa = False
if is_split_kv:
raise NotImplementedError(
"Block sparsity is not yet supported with SplitKV. TODO: partition sparse block lists per split."
)
# See get_broadcast_dims for why this is needed in compile key
block_sparse_broadcast_pattern = None
normalized_block_sparse_tensors = None
q_subtile_factor = None
if block_sparse_tensors is not None:
if seqlen_q is None:
raise ValueError("Block sparsity requires fixed-length sequences (seqlen_q must be known).")
(
normalized_block_sparse_tensors,
block_sparse_broadcast_pattern,
q_subtile_factor,
) = normalize_block_sparse_config(
block_sparse_tensors,
batch_size=batch_size,
num_head=num_head,
seqlen_q=seqlen_q,
seqlen_k=seqlen_k,
block_size=(m_block_size, n_block_size),
q_stage=q_stage,
)
if aux_tensors is not None:
aux_tensor_metadata = get_aux_tensor_metadata(aux_tensors)
else:
aux_tensor_metadata = None
compile_key = (
dtype,
head_dim,
head_dim_v,
qhead_per_kvhead,
causal,
score_mod_hash,
mask_mod_hash,
use_block_sparsity,
block_sparse_broadcast_pattern,
aux_tensor_metadata,
lse is None,
cu_seqlens_q is None,
cu_seqlens_k is None,
seqused_q is None,
seqused_k is None,
page_table is not None,
window_size_left is not None,
window_size_right is not None,
learnable_sink is not None,
m_block_size,
n_block_size,
q_stage,
num_threads,
is_split_kv,
pack_gqa,
arch,
page_size not in [None, 128], # paged KV non-TMA
q_subtile_factor,
)
if compile_key not in _flash_attn_fwd.compile_cache:
(
cu_seqlens_q_tensor,
cu_seqlens_k_tensor,
seqused_q_tensor,
seqused_k_tensor,
learnable_sink_tensor,
) = [
to_cute_tensor(t, assumed_align=4, leading_dim=0)
if t is not None
else None
for t in (cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k, learnable_sink)
]
page_table_tensor = (
to_cute_tensor(page_table, assumed_align=4, leading_dim=1)
if page_table is not None
else None
)
q_tensor, k_tensor, v_tensor, o_tensor = [
to_cute_tensor(t) for t in (q, k, v, out if not is_split_kv else out_partial)
]
if is_split_kv:
lse_tensor = to_cute_tensor(lse_partial, assumed_align=4)
elif lse is not None:
lse_tensor = to_cute_tensor(lse, assumed_align=4)
else:
lse_tensor = None
sparse_tensors = None
if normalized_block_sparse_tensors is not None:
sparse_tensors = to_cute_block_sparse_tensors(normalized_block_sparse_tensors)
cute_aux_tensors = None
aux_tensor_metadata = None
if aux_tensors is not None:
cute_aux_tensors = [to_cute_aux_tensor(buf) for buf in aux_tensors]
if arch // 10 == 9:
assert page_table is None, "paged KV not supported on SM 9.0"
assert not is_split_kv, "SplitKV not supported on SM 9.0"
# fa_fwd = FlashAttentionForwardSm80(
fa_fwd = FlashAttentionForwardSm90(
dtype,
head_dim,
head_dim_v,
qhead_per_kvhead,
is_causal=causal,
is_local=local,
pack_gqa=pack_gqa,
tile_m=m_block_size,
tile_n=n_block_size,
# num_stages=1,
num_stages=2,
num_threads=num_threads,
Q_in_regs=False,
intra_wg_overlap=True,
mma_pv_is_rs=True,
mask_mod=mask_mod,
score_mod=score_mod,
has_aux_tensors=aux_tensors is not None,
q_subtile_factor=q_subtile_factor,
)
elif arch // 10 in [10, 11]:
head_dim_padded = int(math.ceil(head_dim / 16) * 16)
head_dim_v_padded = int(math.ceil(head_dim / 16) * 16)
use_2cta_instrs = (
not causal
and not local
and not is_split_kv
and cu_seqlens_q is None
and seqused_q is None
and not use_block_sparsity
and page_size in [None, 128]
and head_dim_padded == 128
and head_dim_v_padded == 128
)
fa_fwd = FlashAttentionForwardSm100(
head_dim,
head_dim_v,
qhead_per_kvhead=qhead_per_kvhead,
is_causal=causal,
is_local=local,
is_split_kv=is_split_kv,
pack_gqa=pack_gqa,
m_block_size=m_block_size,
n_block_size=n_block_size,
q_stage=q_stage,
is_persistent=not causal
and not local
and cu_seqlens_q is None
and seqused_q is None
and not is_split_kv,
score_mod=score_mod,
mask_mod=mask_mod,
has_aux_tensors=aux_tensors is not None,
paged_kv_non_tma=page_size not in [None, 128],
is_varlen_q=cu_seqlens_q is not None or seqused_q is not None,
q_subtile_factor=q_subtile_factor,
use_2cta_instrs=use_2cta_instrs,
)
else:
raise ValueError(
f"Unsupported compute capability: {arch}. Supported: 9.x, 10.x, 11.x"
)
# TODO: check @can_implement
_flash_attn_fwd.compile_cache[compile_key] = cute.compile(
fa_fwd,
q_tensor,
k_tensor,
v_tensor,
o_tensor,
lse_tensor,
softmax_scale,
current_stream,
cu_seqlens_q_tensor,
cu_seqlens_k_tensor,
seqused_q_tensor,
seqused_k_tensor,
page_table_tensor,
window_size_left,
window_size_right,
learnable_sink_tensor,
sparse_tensors,
cute_aux_tensors,
options="--enable-tvm-ffi",
)
# In "fake mode", we will take torch fake tensors as input and the expected behaviors are:
# - Use those fake metadata to populate compilation cache
# - Return "fake" output tensors, which could be needed in follow-up fake operations
# Thus, we skip the actual kernel invocation here.
if not is_fake_mode():
_flash_attn_fwd.compile_cache[compile_key](
q.detach(),
k.detach(),
v.detach(),
out.detach() if not is_split_kv else out_partial,
lse_partial if is_split_kv else lse,
softmax_scale,
current_stream,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
page_table,
window_size_left,
window_size_right,
learnable_sink,
normalized_block_sparse_tensors[:4] if normalized_block_sparse_tensors is not None else None,
aux_tensors,
)
if is_split_kv:
_flash_attn_fwd_combine(
out_partial,
lse_partial.transpose(-1, -2),
out,
lse.transpose(-1, -2) if lse is not None else None,
cu_seqlens_q,
seqused_q,
)
return out, lse
_flash_attn_fwd.compile_cache = get_jit_cache("fwd")
def _flash_attn_bwd(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
out: torch.Tensor,
dout: torch.Tensor,
lse: torch.Tensor,
softmax_scale: Optional[float] = None,
causal: bool = False,
softcap: float = 0.0,
window_size_left: Optional[int] = None,
window_size_right: Optional[int] = None,
m_block_size: int = 64,
n_block_size: int = 128,
num_threads: int = 256,
pack_gqa: bool = False,
num_stages_Q: int = 2,
num_stages_dO: int = 2,
SdP_swapAB: bool = False,
dKV_swapAB: bool = False,
dQ_swapAB: bool = False,
AtomLayoutMSdP: int = 2,
AtomLayoutNdKV: int = 2,
AtomLayoutMdQ: int = 2,
V_in_regs: bool = False,
cu_seqlens_q: Optional[torch.Tensor] = None,
cu_seqlens_k: Optional[torch.Tensor] = None,
seqused_q: Optional[torch.Tensor] = None,
seqused_k: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
max_seqlen_k: Optional[int] = None,
deterministic: bool = False,
dq: Optional[torch.Tensor] = None,
dk: Optional[torch.Tensor] = None,
dv: Optional[torch.Tensor] = None,
score_mod: Optional[Callable] = None,
score_mod_bwd: Optional[Callable] = None,
mask_mod: Optional[Callable] = None,
aux_tensors: Optional[list[torch.Tensor]] = None,
block_sparse_tensors: Optional[BlockSparseTensorsTorch] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
arch = _get_device_arch()
assert arch // 10 in [9, 10, 11], "Unsupported compute capability. Supported: 9.x, 10.x, 11.x"
num_head, head_dim = q.shape[-2:]
if causal:
window_size_right = 0
if window_size_left is not None and window_size_right is not None and window_size_left + window_size_right < 0:
window_size_left = None
window_size_right = None
local = window_size_left is not None or window_size_right is not None
if local:
if window_size_left is None and window_size_right == 0:
causal, local = True, False
window_size_right = None
else:
causal, local = False, True
if arch // 10 == 9:
m_block_size = 80 if not causal else 64
n_block_size = 128
num_stages_Q = 2
num_stages_dO = 2
num_stages_PdS = 2
SdP_swapAB = True
dKV_swapAB = False
dQ_swapAB = not causal
AtomLayoutMSdP = 1
AtomLayoutNdKV = 2
AtomLayoutMdQ = 1
cluster_size = 1
use_2cta_instrs = False
assert window_size_left is None and window_size_right is None, "local not supported yet on 9.x"
is_varlen = (
cu_seqlens_q is not None
or cu_seqlens_k is not None
or seqused_q is not None
or seqused_k is not None
)
assert not is_varlen, "varlen backward is not yet supported on sm90"
else:
m_block_size = 128
n_block_size = 128
dQ_swapAB = False
dKV_swapAB = False
AtomLayoutMdQ = 1
AtomLayoutNdKV = 1
disable_2cta = (
local
or score_mod is not None
or score_mod_bwd is not None
or mask_mod is not None
)
cluster_size = 2 if head_dim >= 128 and not disable_2cta else 1
use_2cta_instrs = cluster_size==2
q, k, v, out, dout, lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k = [
maybe_contiguous(t)
for t in (q, k, v, out, dout, lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k)
]
if cu_seqlens_q is None:
batch_size, seqlen_q = q.shape[:2]
total_q = batch_size * seqlen_q
else:
batch_size = cu_seqlens_q.shape[0] - 1
total_q = q.shape[0]
seqlen_q = max_seqlen_q if max_seqlen_q is not None else total_q
if cu_seqlens_k is None:
batch_size, seqlen_k = k.shape[:2]
total_k = batch_size * seqlen_k
else:
batch_size = cu_seqlens_k.shape[0] - 1
total_k = k.shape[0]
seqlen_k = max_seqlen_k if max_seqlen_k is not None else total_k
num_head_kv = k.shape[-2]
head_dim_v = v.shape[-1]
use_block_sparsity = block_sparse_tensors is not None
# SM90 block-sparse backward: tile_m=64 is the GCD between a m_block_size that fits,
# the base block_m of 128 from forward, and block-sparse size for subtiling.
if arch // 10 == 9 and use_block_sparsity:
m_block_size = 64
# dQ_swapAB tuning: use False when m_block_size=64 (same as causal case)
dQ_swapAB = False
# NB: this could be derived from the block_sparse_tensors but for now we hardcode it to 2
subtile_factor = 2
seqlen_q_rounded = (seqlen_q + m_block_size - 1) // m_block_size * m_block_size
seqlen_k_rounded = (seqlen_k + n_block_size - 1) // n_block_size * n_block_size
num_n_blocks = seqlen_k_rounded // n_block_size
if cluster_size == 2 and num_n_blocks % cluster_size != 0:
seqlen_k_rounded = seqlen_k_rounded + n_block_size
if cu_seqlens_k is None:
assert k.shape == (batch_size, seqlen_k, num_head_kv, head_dim)
assert v.shape == (batch_size, seqlen_k, num_head_kv, head_dim_v)
else:
assert k.shape == (total_k, num_head_kv, head_dim)
assert v.shape == (total_k, num_head_kv, head_dim_v)
assert cu_seqlens_k.shape == (batch_size + 1,), (
"cu_seqlens_k must have shape (batch_size + 1,)"
)
if cu_seqlens_q is not None:
assert cu_seqlens_q.shape == (batch_size + 1,), (
"cu_seqlens_q must have shape (batch_size + 1,)"
)
assert out.shape == (total_q, num_head, head_dim_v)
assert dout.shape == (total_q, num_head, head_dim_v)
assert lse.shape == (num_head, total_q), "lse must have shape (num_head, total_q)"
else:
assert out.shape == (batch_size, seqlen_q, num_head, head_dim_v)
assert dout.shape == (batch_size, seqlen_q, num_head, head_dim_v)
assert lse.shape == (batch_size, num_head, seqlen_q), (
"lse must have shape (batch_size, num_head, seqlen_q)"
)
assert q.dtype in [torch.float16, torch.bfloat16], "inputs must be float16 or bfloat16"
assert q.dtype == k.dtype == v.dtype == out.dtype == dout.dtype, (
"inputs must have the same dtype"
)
for t in [cu_seqlens_q, cu_seqlens_k]:
if t is not None:
assert t.dtype == torch.int32, "cu_seqlens_q, cu_seqlens_k must be int32"
assert lse.dtype == torch.float32, "lse must be float32"
assert all(
t is None or t.is_cuda for t in (q, k, v, out, dout, lse, cu_seqlens_q, cu_seqlens_k)
), "inputs must be on CUDA device"
assert num_head % num_head_kv == 0, "num_head must be divisible by num_head_kv"
assert head_dim <= 256, "head_dim must be less than or equal to 256"
alignment = 16 // q.element_size()
assert head_dim % alignment == 0, f"head_dim must be divisible by {alignment}"
assert head_dim_v % alignment == 0, f"head_dim_v must be divisible by {alignment}"
if softmax_scale is None:
softmax_scale = 1.0 / math.sqrt(head_dim)
qhead_per_kvhead = num_head // num_head_kv
if pack_gqa is None:
pack_gqa = qhead_per_kvhead > 1
# pack_gqa backward not yet supported in bwd
pack_gqa = False
if arch // 10 not in [10, 11]:
assert deterministic is False, "bwd deterministic only supported for sm100/sm110 for now"
if score_mod is not None:
assert score_mod_bwd is not None, "score_mod_bwd is required when score_mod is provided"
assert softcap == 0.0, "softcap and score_mod are mutually exclusive (different log2 scaling)"
assert cu_seqlens_q is None and cu_seqlens_k is None, (
"varlen + score_mod not supported in bwd yet"
)
device = q.device
out_torch_dtype = q.dtype
if dq is None:
dq = torch.empty_like(q)
else:
_validate_tensor(dq, "dq", q.shape, out_torch_dtype, device)
if dk is None:
dk = torch.empty_like(k)
else:
_validate_tensor(dk, "dk", k.shape, out_torch_dtype, device)
if dv is None:
dv = torch.empty_like(v)
else:
_validate_tensor(dv, "dv", v.shape, out_torch_dtype, device)
head_dim_rounded = (head_dim + 32 - 1) // 32 * 32
if cu_seqlens_q is None:
dq_accum = torch.empty(
batch_size,
num_head,
seqlen_q_rounded * head_dim_rounded,
dtype=torch.float32,
device=device,
)
dpsum = torch.empty(
batch_size, num_head, seqlen_q_rounded, dtype=torch.float32, device=device
)
lse_log2 = torch.empty(
batch_size, num_head, seqlen_q_rounded, dtype=torch.float32, device=device
)
else:
total_q_rounded_padded = (
(total_q + cu_seqlens_q.shape[0] * m_block_size - 1) // m_block_size * m_block_size
)
dq_accum = torch.empty(
num_head, total_q_rounded_padded * head_dim_rounded, dtype=torch.float32, device=device
)
dpsum = torch.empty(num_head, total_q_rounded_padded, dtype=torch.float32, device=device)
lse_log2 = torch.empty(num_head, total_q_rounded_padded, dtype=torch.float32, device=device)
dKV_postprocess = qhead_per_kvhead > 1
if dKV_postprocess:
head_dim_v_rounded = (head_dim_v + 32 - 1) // 32 * 32
if cu_seqlens_k is None:
dk_accum = torch.zeros(
batch_size,
num_head_kv,
seqlen_k_rounded * head_dim_rounded,
dtype=torch.float32,
device=device,
)
dv_accum = torch.zeros(
batch_size,
num_head_kv,
seqlen_k_rounded * head_dim_v_rounded,
dtype=torch.float32,
device=device,
)
else:
cluster_tile_n = cluster_size * n_block_size
total_k_rounded_padded = (
(total_k + cu_seqlens_k.shape[0] * cluster_tile_n - 1) // cluster_tile_n * cluster_tile_n
)
dk_accum = torch.zeros(
num_head_kv,
total_k_rounded_padded * head_dim_rounded,
dtype=torch.float32,
device=device,
)
dv_accum = torch.zeros(
num_head_kv,
total_k_rounded_padded * head_dim_v_rounded,
dtype=torch.float32,
device=device,
)
dtype = torch2cute_dtype_map[q.dtype]
current_stream = cuda.CUstream(torch.cuda.current_stream().cuda_stream)
if deterministic:
dQ_semaphore = torch.zeros(batch_size, num_head, seqlen_q_rounded // m_block_size, cluster_size, dtype=torch.int32, device="cuda")
else:
dQ_semaphore = None
if deterministic and qhead_per_kvhead > 1:
dK_semaphore = torch.zeros(batch_size, num_head_kv, seqlen_k_rounded // n_block_size, 2, dtype=torch.int32, device="cuda")
dV_semaphore = torch.zeros(batch_size, num_head_kv, seqlen_k_rounded // n_block_size, 2, dtype=torch.int32, device="cuda")
else:
dK_semaphore = None
dV_semaphore = None
# Preprocess kernel: compute (o * dout).sum(dim=-1), lse * log2_e, and zero out dq_accum.
compile_key_pre = (
arch,
dtype,
head_dim,
head_dim_v,
m_block_size,
num_threads,
cu_seqlens_q is None,
seqused_q is None,
get_broadcast_dims(out),
get_broadcast_dims(dout),
)
if compile_key_pre not in _flash_attn_bwd.compile_cache_pre:
o_tensor, do_tensor = [to_cute_tensor(t) for t in (out, dout)]
dq_accum_tensor, dpsum_tensor, lse_log2_tensor = [
to_cute_tensor(t) for t in (dq_accum, dpsum, lse_log2)
]
lse_tensor = to_cute_tensor(lse, assumed_align=4)
cu_seqlens_q_tensor, seqused_q_tensor = [
to_cute_tensor(t, assumed_align=4) if t is not None else None
for t in (cu_seqlens_q, seqused_q)
]
fa_bwd_pre = FlashAttentionBackwardPreprocess(
dtype,
head_dim,
head_dim_v,
arch,
m_block_size,
num_threads=num_threads,
)
# TODO: check @can_implement
_flash_attn_bwd.compile_cache_pre[compile_key_pre] = cute.compile(
fa_bwd_pre,
o_tensor,
do_tensor,
dpsum_tensor,
lse_tensor,
lse_log2_tensor,
dq_accum_tensor,
cu_seqlens_q_tensor,
seqused_q_tensor,
current_stream,
options="--enable-tvm-ffi",
)
if not is_fake_mode():
_flash_attn_bwd.compile_cache_pre[compile_key_pre](
out,
dout,
dpsum,
lse,
lse_log2,
dq_accum,
cu_seqlens_q,
seqused_q,
current_stream,
)
# NB num_threads application for 3 kernels
# There are pre, main, post processing kernels, currenlty num_threads is only actually
# used for the pre proc, and then we hard code to 384 for the main and post proc, and we do
# before cache key gen
num_threads = 384
# Backward kernel: compute dk, dv, dq_accum.
score_mod_hash = utils.hash_callable(score_mod) if score_mod else False
score_mod_bwd_hash = utils.hash_callable(score_mod_bwd) if score_mod_bwd else False
mask_mod_hash = utils.hash_callable(mask_mod) if mask_mod else False
num_aux_tensors = len(aux_tensors) if aux_tensors else 0
cute_aux_tensors = None
if aux_tensors is not None:
cute_aux_tensors = [to_cute_tensor(buf, assumed_align=None, fully_dynamic=True) for buf in aux_tensors]
block_sparse_broadcast_pattern = None
normalized_block_sparse_tensors = None
if block_sparse_tensors is not None:
(
normalized_block_sparse_tensors,
block_sparse_broadcast_pattern,
) = normalize_block_sparse_config_bwd(
block_sparse_tensors,
batch_size=batch_size,
num_head=num_head,
seqlen_q=seqlen_q,
seqlen_k=seqlen_k,
block_size=(m_block_size, n_block_size),
subtile_factor=subtile_factor,
)
if arch // 10 == 9:
compile_key = (
arch,
dtype,
head_dim,
head_dim_v,
qhead_per_kvhead,
causal,
softcap != 0.0,
m_block_size,
n_block_size,
num_threads,
pack_gqa,
num_stages_Q,
num_stages_dO,
SdP_swapAB,
dKV_swapAB,
dQ_swapAB,
AtomLayoutMSdP,
AtomLayoutNdKV,
AtomLayoutMdQ,
V_in_regs,
cu_seqlens_q is None,
cu_seqlens_k is None,
seqused_q is None,
seqused_k is None,
score_mod_hash,
score_mod_bwd_hash,
mask_mod_hash,
num_aux_tensors,
use_block_sparsity,
block_sparse_broadcast_pattern,
get_broadcast_dims(q),
get_broadcast_dims(k),
get_broadcast_dims(v),
get_broadcast_dims(dout),
)
else:
compile_key = (
arch,
dtype,
head_dim,
head_dim_v,
qhead_per_kvhead,
causal,
window_size_left is not None,
window_size_right is not None,
softcap != 0.0,
m_block_size,
n_block_size,
num_threads,
pack_gqa,
cluster_size,
use_2cta_instrs,
deterministic,
score_mod_hash,
score_mod_bwd_hash,
mask_mod_hash,
num_aux_tensors,
use_block_sparsity,
block_sparse_broadcast_pattern,
cu_seqlens_q is None,
cu_seqlens_k is None,
seqused_q is None,
seqused_k is None,
get_broadcast_dims(q),
get_broadcast_dims(k),
get_broadcast_dims(v),
get_broadcast_dims(dout),
)
if compile_key not in _flash_attn_bwd.compile_cache:
q_tensor, k_tensor, v_tensor, do_tensor, dq_tensor, dk_tensor, dv_tensor = [
to_cute_tensor(t) for t in (q, k, v, dout, dq, dk, dv)
]
dq_accum_tensor, dpsum_tensor, lse_log2_tensor = [
to_cute_tensor(t) for t in (dq_accum, dpsum, lse_log2)
]
if dKV_postprocess:
dk_accum_tensor, dv_accum_tensor = [
to_cute_tensor(t) for t in (dk_accum, dv_accum)
]
cu_seqlens_q_tensor, cu_seqlens_k_tensor, seqused_q_tensor, seqused_k_tensor = [
to_cute_tensor(t, assumed_align=4) if t is not None else None
for t in (cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k)
]
dQ_semaphore_tensor, dK_semaphore_tensor, dV_semaphore_tensor = [
utils.convert_from_dlpack_leading_static(t.detach(), leading_dim=3, alignment=4, stride_order=t.dim_order())
if t is not None else None
for t in (dQ_semaphore, dK_semaphore, dV_semaphore)
]
fa_bwd_sm80 = FlashAttentionBackwardSm80(
dtype,
head_dim,
head_dim_v,
qhead_per_kvhead,
m_block_size,
n_block_size,
num_stages_Q,
num_stages_dO,
num_threads,
pack_gqa,
causal,
SdP_swapAB,
dKV_swapAB,
dQ_swapAB,
AtomLayoutMSdP,
AtomLayoutNdKV,
AtomLayoutMdQ,
V_in_regs=V_in_regs,
)
if arch // 10 == 9:
fa_bwd_obj = FlashAttentionBackwardSm90(
dtype,
head_dim,
head_dim_v,
qhead_per_kvhead,
causal,
m_block_size,
n_block_size,
num_stages_Q,
num_stages_dO,
num_stages_PdS,
SdP_swapAB,
dKV_swapAB,
dQ_swapAB,
AtomLayoutMSdP,
AtomLayoutNdKV,
AtomLayoutMdQ,
num_threads,
V_in_regs=V_in_regs,
score_mod=score_mod,
score_mod_bwd=score_mod_bwd,
mask_mod=mask_mod,
has_aux_tensors=aux_tensors is not None,
subtile_factor=subtile_factor,
)
else:
fa_bwd_obj = FlashAttentionBackwardSm100(
head_dim,
head_dim_v,
is_causal=causal,
is_local=local,
qhead_per_kvhead=qhead_per_kvhead,
tile_m=m_block_size,
tile_n=n_block_size,
cluster_size=cluster_size,
use_2cta_instrs=use_2cta_instrs,
deterministic=deterministic,
score_mod=score_mod,
score_mod_bwd=score_mod_bwd,
mask_mod=mask_mod,
has_aux_tensors=aux_tensors is not None,
subtile_factor=subtile_factor,
)
# Block sparse tensors for backward use Q-direction indexing (transposed from forward).
sparse_tensors_compile = None
if normalized_block_sparse_tensors is not None:
sparse_tensors_compile = to_cute_block_sparse_tensors(normalized_block_sparse_tensors)
# TODO: check @can_implement
_flash_attn_bwd.compile_cache[compile_key] = cute.compile(
fa_bwd_obj,
q_tensor,
k_tensor,
v_tensor,
do_tensor,
lse_log2_tensor,
dpsum_tensor,
dq_accum_tensor,
dk_tensor if not dKV_postprocess else dk_accum_tensor,
dv_tensor if not dKV_postprocess else dv_accum_tensor,
softmax_scale,
current_stream,
cu_seqlens_q_tensor,
cu_seqlens_k_tensor,
seqused_q_tensor,
seqused_k_tensor,
None, # softcap - not yet supported in backward
window_size_left,
window_size_right,
dQ_semaphore_tensor,
dK_semaphore_tensor,
dV_semaphore_tensor,
cute_aux_tensors,
sparse_tensors_compile,
options="--enable-tvm-ffi",
)
if not is_fake_mode():
_flash_attn_bwd.compile_cache[compile_key](
q.detach(),
k.detach(),
v.detach(),
dout,
lse_log2,
dpsum,
dq_accum,
dk if not dKV_postprocess else dk_accum,
dv if not dKV_postprocess else dv_accum,
softmax_scale,
current_stream,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
None, # softcap - not yet supported in backward
window_size_left,
window_size_right,
dQ_semaphore,
dK_semaphore,
dV_semaphore,
aux_tensors,
normalized_block_sparse_tensors[:4] if normalized_block_sparse_tensors is not None else None,
)
num_threads = 256 if arch // 10 == 9 else 128
# Postprocess kernel: convert dq_accum from float32 to dq in bf16/fp16
compile_key_post = (
arch,
dtype,
head_dim,
m_block_size,
num_threads,
AtomLayoutMdQ,
dQ_swapAB,
cu_seqlens_q is None,
seqused_q is None,
use_2cta_instrs,
1, # no cluster for tile_m
get_broadcast_dims(dq_accum),
get_broadcast_dims(dq),
)
if compile_key_post not in _flash_attn_bwd.compile_cache_post:
dq_accum_tensor = to_cute_tensor(dq_accum)
dq_tensor = to_cute_tensor(dq)
cu_seqlens_q_tensor, seqused_q_tensor = [
to_cute_tensor(t, assumed_align=4) if t is not None else None
for t in (cu_seqlens_q, seqused_q)
]
fa_bwd_post = FlashAttentionBackwardPostprocess(
dtype, head_dim, arch, m_block_size, num_threads, AtomLayoutMdQ, dQ_swapAB,
use_2cta_instrs=use_2cta_instrs,
)
# TODO: check @can_implement
_flash_attn_bwd.compile_cache_post[compile_key_post] = cute.compile(
fa_bwd_post,
dq_accum_tensor,
dq_tensor,
softmax_scale,
cu_seqlens_q_tensor,
seqused_q_tensor,
current_stream,
options="--enable-tvm-ffi",
)
if not is_fake_mode():
_flash_attn_bwd.compile_cache_post[compile_key_post](
dq_accum,
dq,
softmax_scale,
cu_seqlens_q,
seqused_q,
current_stream,
)
if dKV_postprocess:
# Postprocess kernel: convert dk_accum & dv_accum from float32 to bf16/fp16
compile_key_post = (
arch,
dtype,
head_dim,
n_block_size,
num_threads,
AtomLayoutNdKV,
dKV_swapAB,
cu_seqlens_k is None,
seqused_k is None,
False, # even for 2cta, is split along hdim, so always False
cluster_size, # cluster is for tile_n
get_broadcast_dims(dk_accum),
get_broadcast_dims(dk),
)
if compile_key_post not in _flash_attn_bwd.compile_cache_post:
dk_accum_tensor = to_cute_tensor(dk_accum)
dk_tensor = to_cute_tensor(dk)
cu_seqlens_k_tensor, seqused_k_tensor = [
to_cute_tensor(t, assumed_align=4) if t is not None else None
for t in (cu_seqlens_k, seqused_k)
]
fa_bwd_post = FlashAttentionBackwardPostprocess(
dtype, head_dim, arch, n_block_size, num_threads, AtomLayoutNdKV, dKV_swapAB,
cluster_size=cluster_size,
)
# TODO: check @can_implement
_flash_attn_bwd.compile_cache_post[compile_key_post] = cute.compile(
fa_bwd_post,
dk_accum_tensor,
dk_tensor,
softmax_scale,
cu_seqlens_k_tensor,
seqused_k_tensor,
current_stream,
options="--enable-tvm-ffi",
)
if not is_fake_mode():
_flash_attn_bwd.compile_cache_post[compile_key_post](
dk_accum,
dk,
softmax_scale,
cu_seqlens_k,
seqused_k,
current_stream,
)
compile_key_post = (
arch,
dtype,
head_dim_v,
n_block_size,
num_threads,
AtomLayoutNdKV,
dKV_swapAB,
cu_seqlens_k is None,
seqused_k is None,
False,
cluster_size,
get_broadcast_dims(dv_accum),
get_broadcast_dims(dv),
)
if compile_key_post not in _flash_attn_bwd.compile_cache_post:
dv_accum_tensor = to_cute_tensor(dv_accum)
dv_tensor = to_cute_tensor(dv)
cu_seqlens_k_tensor, seqused_k_tensor = [
to_cute_tensor(t, assumed_align=4) if t is not None else None
for t in (cu_seqlens_k, seqused_k)
]
fa_bwd_post = FlashAttentionBackwardPostprocess(
dtype, head_dim_v, arch, n_block_size, num_threads, AtomLayoutNdKV, dKV_swapAB,
cluster_size=cluster_size,
)
# TODO: check @can_implement
_flash_attn_bwd.compile_cache_post[compile_key_post] = cute.compile(
fa_bwd_post,
dv_accum_tensor,
dv_tensor,
cutlass.Float32(1.0),
cu_seqlens_k_tensor,
seqused_k_tensor,
current_stream,
options="--enable-tvm-ffi",
)
if not is_fake_mode():
_flash_attn_bwd.compile_cache_post[compile_key_post](
dv_accum,
dv,
1.0,
cu_seqlens_k,
seqused_k,
current_stream,
)
return dq, dk, dv
_flash_attn_bwd.compile_cache_pre = get_jit_cache("bwd_pre")
_flash_attn_bwd.compile_cache = get_jit_cache("bwd")
_flash_attn_bwd.compile_cache_post = get_jit_cache("bwd_post")
class FlashAttnFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
softmax_scale: Optional[float] = None,
causal: bool = False,
window_size: Tuple[Optional[int], Optional[int]] = (None, None),
learnable_sink: Optional[torch.Tensor] = None,
softcap: float = 0.0,
num_splits: int = 1,
pack_gqa: Optional[bool] = None,
deterministic: bool = False,
mask_mod: Optional[Callable] = None,
full_block_cnt: Optional[torch.Tensor] = None,
full_block_idx: Optional[torch.Tensor] = None,
mask_block_cnt: Optional[torch.Tensor] = None,
mask_block_idx: Optional[torch.Tensor] = None,
block_size: Optional[Tuple[int, int]] = None,
return_lse: bool = False,
):
# Only create block sparse tensors if at least one block sparse parameter is provided
block_sparse_tensors = None
if any(t is not None for t in [full_block_cnt, full_block_idx, mask_block_cnt, mask_block_idx]):
block_sparse_tensors = BlockSparseTensorsTorch(
full_block_cnt=full_block_cnt,
full_block_idx=full_block_idx,
mask_block_cnt=mask_block_cnt,
mask_block_idx=mask_block_idx,
block_size=block_size,
)
out, lse = _flash_attn_fwd(
q,
k,
v,
softmax_scale=softmax_scale,
causal=causal,
window_size_left=window_size[0],
window_size_right=window_size[1],
learnable_sink=learnable_sink,
softcap=softcap,
num_splits=num_splits,
pack_gqa=pack_gqa,
mask_mod=mask_mod,
block_sparse_tensors=block_sparse_tensors,
return_lse=return_lse,
)
ctx.save_for_backward(q, k, v, out, lse)
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.window_size = window_size
ctx.softcap = softcap
ctx.deterministic = deterministic
# LSE gradient is not supported yet
if lse is not None:
ctx.mark_non_differentiable(lse)
return out, lse
@staticmethod
def backward(ctx, dout, *args):
q, k, v, out, lse = ctx.saved_tensors
dq, dk, dv = _flash_attn_bwd(
q,
k,
v,
out,
dout,
lse,
ctx.softmax_scale,
ctx.causal,
ctx.softcap,
window_size_left=ctx.window_size[0],
window_size_right=ctx.window_size[1],
deterministic=ctx.deterministic,
)
return dq, dk, dv, *((None,) * 20) # Extra Nones is fine
class FlashAttnVarlenFunc(torch.autograd.Function):
@staticmethod
def forward(
ctx,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
cu_seqlens_q: Optional[torch.Tensor],
cu_seqlens_k: Optional[torch.Tensor],
seqused_q: Optional[torch.Tensor] = None,
seqused_k: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
max_seqlen_k: Optional[int] = None,
page_table: Optional[torch.Tensor] = None,
softmax_scale: Optional[float] = None,
causal: bool = False,
window_size: Tuple[Optional[int], Optional[int]] = (None, None),
learnable_sink: Optional[torch.Tensor] = None,
softcap: float = 0.0,
num_splits: int = 1,
pack_gqa: Optional[bool] = None,
deterministic: bool = False,
score_mod: Optional[Callable] = None,
aux_tensors: Optional[list] = None,
return_lse: bool = False,
):
out, lse = _flash_attn_fwd(
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
page_table=page_table,
softmax_scale=softmax_scale,
causal=causal,
window_size_left=window_size[0],
window_size_right=window_size[1],
learnable_sink=learnable_sink,
softcap=softcap,
num_splits=num_splits,
pack_gqa=pack_gqa,
score_mod=score_mod,
aux_tensors=aux_tensors,
return_lse=return_lse,
)
ctx.save_for_backward(q, k, v, out, lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k)
ctx.softmax_scale = softmax_scale
ctx.causal = causal
ctx.window_size = window_size
ctx.softcap = softcap
ctx.deterministic = deterministic
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_k = max_seqlen_k
# LSE gradient is not supported yet
if lse is not None:
ctx.mark_non_differentiable(lse)
return out, lse
@staticmethod
def backward(ctx, dout, *args):
q, k, v, out, lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k = ctx.saved_tensors
assert ctx.softcap == 0.0
dq, dk, dv = _flash_attn_bwd(
q,
k,
v,
out,
dout,
lse,
ctx.softmax_scale,
ctx.causal,
ctx.softcap,
window_size_left=ctx.window_size[0],
window_size_right=ctx.window_size[1],
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
seqused_q=seqused_q,
seqused_k=seqused_k,
max_seqlen_q=ctx.max_seqlen_q,
max_seqlen_k=ctx.max_seqlen_k,
deterministic=ctx.deterministic,
)
return dq, dk, dv, *((None,) * 20)
def flash_attn_func(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
softmax_scale: Optional[float] = None,
causal: bool = False,
window_size: Tuple[Optional[int], Optional[int]] = (None, None),
learnable_sink: Optional[torch.Tensor] = None,
softcap: float = 0.0,
num_splits: int = 1,
pack_gqa: Optional[bool] = None,
deterministic: bool = False,
mask_mod: Optional[Callable] = None,
full_block_cnt: Optional[torch.Tensor] = None,
full_block_idx: Optional[torch.Tensor] = None,
mask_block_cnt: Optional[torch.Tensor] = None,
mask_block_idx: Optional[torch.Tensor] = None,
block_size: Optional[Tuple[int, int]] = None,
return_lse: bool = False,
):
return FlashAttnFunc.apply(
q,
k,
v,
softmax_scale,
causal,
window_size,
learnable_sink,
softcap,
num_splits,
pack_gqa,
deterministic,
mask_mod,
full_block_cnt,
full_block_idx,
mask_block_cnt,
mask_block_idx,
block_size,
return_lse,
)
def flash_attn_varlen_func(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
cu_seqlens_q: Optional[torch.Tensor] = None,
cu_seqlens_k: Optional[torch.Tensor] = None,
max_seqlen_q: Optional[int] = None,
max_seqlen_k: Optional[int] = None,
seqused_q: Optional[torch.Tensor] = None,
seqused_k: Optional[torch.Tensor] = None,
page_table: Optional[torch.Tensor] = None,
softmax_scale: Optional[float] = None,
causal: bool = False,
window_size: Tuple[Optional[int], Optional[int]] = (None, None),
learnable_sink: Optional[torch.Tensor] = None,
softcap: float = 0.0,
num_splits: int = 1,
pack_gqa: Optional[bool] = None,
deterministic: bool = False,
score_mod: Optional[Callable] = None,
aux_tensors: Optional[list] = None,
return_lse: bool = False,
):
return FlashAttnVarlenFunc.apply(
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
page_table,
softmax_scale,
causal,
window_size,
learnable_sink,
softcap,
num_splits,
pack_gqa,
deterministic,
score_mod,
aux_tensors,
return_lse,
)
def _flash_attn_fwd_combine(
out_partial: torch.Tensor,
lse_partial: torch.Tensor,
out: torch.Tensor,
lse: Optional[torch.Tensor] = None,
cu_seqlens: Optional[torch.Tensor] = None,
seqused: Optional[torch.Tensor] = None,
num_splits_dynamic_ptr: Optional[torch.Tensor] = None,
semaphore_to_reset: Optional[torch.Tensor] = None,
) -> None:
"""Forward combine kernel for split attention computation.
Combines partial outputs and log-sum-exp values from multiple splits
of attention computation into final outputs.
Args:
out_partial: Partial outputs tensor (num_splits, batch, seqlen, nheads, headdim) or
(num_splits, total_q, nheads, headdim) if there's cu_seqlens
lse_partial: Partial LSE tensor (num_splits, batch, seqlen, nheads) or
(num_splits, total_q, nheads) if there's cu_seqlens
out: Output tensor (batch, seqlen, nheads, headdim) or (total_q, nheads, headdim) if there's cu_seqlens
lse: Output LSE tensor (batch, seqlen, nheads) or (total_q, nheads) if there's cu_seqlens.
cu_seqlens: Cumulative sequence lengths for variable length sequences
seqused: Used sequence lengths for each batch
num_splits_dynamic_ptr: Dynamic number of splits per batch
semaphore_to_reset: Semaphore for synchronization
k_block_size: Block size for head dimension
Returns:
None
"""
# Input validation
assert out_partial.dim() in [4, 5], "out_partial must have 4 or 5 dimensions"
assert lse_partial.dim() in [3, 4], "lse_partial must have 3 or 4 dimensions"
assert out_partial.dtype in [torch.float16, torch.bfloat16, torch.float32], (
"out_partial must be fp16, bf16, or fp32"
)
assert lse_partial.dtype == torch.float32, "lse_partial must be fp32"
assert out_partial.is_cuda and lse_partial.is_cuda, "tensors must be on CUDA device"
assert out_partial.stride(-1) == 1, "out_partial must be contiguous in the last dimension"
assert lse_partial.stride(-2) == 1, "lse_partial must be contiguous in the seqlen dimension"
assert lse_partial.shape == out_partial.shape[:-1]
# Determine if this is variable length based on dimensions
is_varlen = out_partial.dim() == 4
# Validate output tensor shapes and types
assert out.shape == out_partial.shape[1:], "out shape mismatch"
if lse is not None:
assert lse.shape == lse_partial.shape[1:], "lse shape mismatch"
assert lse.dtype == torch.float32, "lse must be fp32"
# Validate optional tensors
for t, name in [
(cu_seqlens, "cu_seqlens"),
(seqused, "seqused"),
(num_splits_dynamic_ptr, "num_splits_dynamic_ptr"),
]:
if t is not None:
assert t.dtype == torch.int32, f"{name} must be int32"
assert t.is_cuda, f"{name} must be on CUDA device"
assert t.is_contiguous(), f"{name} must be contiguous"
head_dim = out_partial.shape[-1]
num_splits = out_partial.shape[0]
assert num_splits <= 256
# If hdim is 96 or 192, it's faster to round them to 128 or 256 respectively
# so that kBlockM is smaller and we have more parallelism.
k_block_size = 64 if head_dim <= 64 else 128
# We want kBlockM to be as small as possible to maximize parallelism.
# E.g., if hdim is 64, we want kBlockM to be 16 so that we can use 256 threads, each reading 4 elements (floats).
m_block_size = 8 if k_block_size % 128 == 0 else (16 if k_block_size % 64 == 0 else 32)
log_max_splits = max(math.ceil(math.log2(num_splits)), 4)
if m_block_size == 8:
# If kBlockM == 8 then the minimum number of splits is 32.
# TODO: we can deal w this by using 128 threads instead
log_max_splits = max(log_max_splits, 5)
current_stream = cuda.CUstream(torch.cuda.current_stream().cuda_stream)
# Create combine kernel configuration
dtype = torch2cute_dtype_map[out.dtype]
dtype_partial = torch2cute_dtype_map[out_partial.dtype]
compile_key = (
dtype,
dtype_partial,
head_dim,
m_block_size,
k_block_size,
log_max_splits,
cu_seqlens is not None,
seqused is not None,
lse is not None,
)
if compile_key not in _flash_attn_fwd_combine.compile_cache:
out_partial_tensor = to_cute_tensor(
out_partial, leading_dim=4 if not is_varlen else 3
)
lse_partial_tensor = to_cute_tensor(
lse_partial, assumed_align=4, leading_dim=lse_partial.ndim - 2
)
out_tensor = to_cute_tensor(out, leading_dim=3 if not is_varlen else 2)
lse_tensor = (
to_cute_tensor(lse, assumed_align=4, leading_dim=lse.ndim - 2)
if lse is not None
else None
)
optional_tensors = [
to_cute_tensor(t, assumed_align=4, leading_dim=0)
if t is not None
else None
for t in (cu_seqlens, seqused, num_splits_dynamic_ptr, semaphore_to_reset)
]
cu_seqlens_tensor, seqused_tensor, num_splits_dynamic_tensor, semaphore_tensor = (
optional_tensors
)
fa_combine = FlashAttentionForwardCombine(
dtype=dtype,
dtype_partial=dtype_partial,
head_dim=head_dim,
m_block_size=m_block_size,
k_block_size=k_block_size,
log_max_splits=log_max_splits,
)
# Check if implementation is supported
if not fa_combine.can_implement(
dtype,
dtype_partial,
head_dim,
m_block_size,
k_block_size,
log_max_splits,
num_threads=256,
):
raise RuntimeError(
"FlashAttention combine kernel cannot be implemented with given parameters"
)
_flash_attn_fwd_combine.compile_cache[compile_key] = cute.compile(
fa_combine,
out_partial_tensor,
lse_partial_tensor,
out_tensor,
lse_tensor,
cu_seqlens_tensor,
seqused_tensor,
num_splits_dynamic_tensor,
semaphore_tensor,
current_stream,
options="--enable-tvm-ffi",
)
if not is_fake_mode():
_flash_attn_fwd_combine.compile_cache[compile_key](
out_partial,
lse_partial,
out,
lse,
cu_seqlens,
seqused,
num_splits_dynamic_ptr,
semaphore_to_reset,
current_stream,
)
_flash_attn_fwd_combine.compile_cache = get_jit_cache("fwd_combine")
def flash_attn_combine(
out_partial: torch.Tensor,
lse_partial: torch.Tensor,
out: Optional[torch.Tensor] = None,
out_dtype: Optional[torch.dtype] = None,
cu_seqlens: Optional[torch.Tensor] = None,
seqused: Optional[torch.Tensor] = None,
return_lse: bool = True,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Flash Attention combine function for split attention computation.
Combines partial outputs and log-sum-exp values from multiple splits
of attention computation into final outputs. This is the main user-facing
interface for the combine kernel.
Args:
out_partial: Partial outputs tensor with shape:
- (num_splits, batch_size, seqlen, num_heads, head_size) for regular batched input
- (num_splits, total_q, num_heads, head_size) for variable length input
lse_partial: Partial LSE tensor with shape:
- (num_splits, batch_size, seqlen, num_heads) for regular batched input
- (num_splits, total_q, num_heads) for variable length input
out: Optional output tensor. If None, will be created automatically.
out_dtype: Optional output dtype. If None, will use fp16/bf16 based on input.
cu_seqlens: Cumulative sequence lengths for variable length sequences
seqused: Used sequence lengths for each batch
return_lse: Whether to return the combined LSE tensor. Default is True.
Returns:
Tuple of (out, lse) where:
- out: Combined output tensor with shape (batch_size, seqlen, num_heads, head_size)
or (total_q, num_heads, head_size) for varlen
- lse: Combined log-sum-exp tensor with shape (batch_size, seqlen, num_heads)
or (total_q, num_heads) for varlen. None if return_lse=False
Note:
This function expects the input tensors to be in the format produced by
split attention computation, where the first dimension is num_splits.
The permuting from user format to kernel format is now done inside the kernel.
"""
# Input validation
assert out_partial.dim() in [4, 5], "out_partial must have 4 or 5 dimensions"
assert lse_partial.dim() in [3, 4], "lse_partial must have 3 or 4 dimensions"
assert out_partial.dtype == torch.float32, "out_partial must be fp32 (from accumulation)"
assert lse_partial.dtype == torch.float32, "lse_partial must be fp32"
# Determine if this is variable length based on dimensions
is_varlen = out_partial.dim() == 4
if is_varlen:
# Variable length: (num_splits, total_q, num_heads, head_size)
num_splits, total_q, num_heads, head_size = out_partial.shape
assert lse_partial.shape == (num_splits, total_q, num_heads), (
"lse_partial shape mismatch for varlen"
)
batch_size = 1 # Treat as single batch for varlen
seqlen = total_q
else:
# Regular batched: (num_splits, batch_size, seqlen, num_heads, head_size)
num_splits, batch_size, seqlen, num_heads, head_size = out_partial.shape
assert lse_partial.shape == (num_splits, batch_size, seqlen, num_heads), (
"lse_partial shape mismatch"
)
# Determine output dtype
if out_dtype is None:
out_dtype = out_partial.dtype
# Create output if not provided
device = out_partial.device
if out is None:
if is_varlen:
out = torch.empty(total_q, num_heads, head_size, dtype=out_dtype, device=device)
else:
out = torch.empty(
batch_size, seqlen, num_heads, head_size, dtype=out_dtype, device=device
)
# Create lse output only if requested
if return_lse:
if is_varlen:
lse = torch.empty(num_heads, total_q, dtype=torch.float32, device=device).transpose(
0, 1
)
else:
lse = torch.empty(
batch_size, num_heads, seqlen, dtype=torch.float32, device=device
).transpose(1, 2)
else:
lse = None
_flash_attn_fwd_combine(
out_partial,
lse_partial,
out,
lse,
cu_seqlens,
seqused,
)
return out, lse
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/interface.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1722,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/mask.py | # Copyright (c) 2025, Tri Dao.
from typing import Optional, Callable
from dataclasses import dataclass
import cutlass
import cutlass.cute as cute
from cutlass import Float32, Int32, const_expr
from quack import layout_utils
import flash_attn.cute.utils as utils
from flash_attn.cute.seqlen_info import SeqlenInfoQK
@cute.jit
def mask_r2p(X: cute.Tensor, col_limit: Int32, arch: int = 90, rank1: bool = False) -> None:
# Bit manipulation, compiles down to the R2P instruction
# For sm100: we know that tScS_t2r[i][1] == i, for the particular tmem copy atom we're using.
# For sm90: instead of comparing limit to 0, 1, 8, 9, 16, 17, ...,
# we compare a transformed version of limit to 0, 1, 2, 3, 4, 5, ...
if const_expr(arch == 90):
col_limit_transformed = col_limit // 8 * 2 + min(col_limit % 8, 2)
else:
col_limit_transformed = col_limit
ncol = const_expr(cute.size(X.shape[cute.rank(X) - 1]) if not rank1 else cute.size(X.shape))
# Ideally we'd move by 32 instead of 24, but mask >> i isn't correct for i == 31
for s in cutlass.range_constexpr(cute.ceil_div(ncol, 24)):
# Don't need to clamp to 32 since the shr.u32 instruction does that already
col_limit_right_s = max(col_limit_transformed - s * 24, 0)
# 0 -> 0b00...00, 1 -> 0b00...01, ..., 31 -> 0b01...11, 32 -> 0b11...11
mask = (1 << col_limit_right_s) - 1
# This needs to be range_constexpr, o/w the compiler can't generate the R2P instruction
for i in cutlass.range_constexpr(min(24, ncol - s * 24)):
in_bound = cutlass.Boolean(mask & (1 << i))
c = s * 24 + i
if const_expr(rank1):
X[c] = X[c] if in_bound else -Float32.inf
# This is the equivalent of:
# X[s * 24 + i] = X[s * 24 + i] if col_limit_right_s <= i else -Float32.inf
else:
for r in cutlass.range_constexpr(cute.size(X.shape[0])):
X[r, c] = X[r, c] if in_bound else -Float32.inf
@cute.jit
def mask_r2p_transposed(X: cute.Tensor, row_limit_top: Int32, num_rep: int) -> None:
# Bit manipulation, compiles down to the R2P instruction
# For sm100: we know that tScS_t2r[i][0] has the form 0, 1, ..., 31, 64, ..., 127
# or 0, 1, ..., 15, 32, ..., 47, 64, ...
# We compare a transformed version of limit to 0, 1, 2, 3, 4, 5, ...
# Here we hardcode for the case of 2 warp groups.
num_wg = 2
row_limit_top_transformed = row_limit_top // (num_rep * num_wg) * num_rep + min(
row_limit_top % (num_rep * num_wg), num_rep
)
ncol = cute.size(X.shape)
# Ideally we'd move by 32 instead of 24, but mask >> i isn't correct for i == 31
for s in cutlass.range_constexpr(cute.ceil_div(ncol, 24)):
row_limit_top_s = max(row_limit_top_transformed - s * 24, 0)
# 0 -> 0b00...00, 1 -> 0b00...01, ..., 31 -> 0b01...11, 32 -> 0b11...11
mask = (1 << row_limit_top_s) - 1
# This needs to be range_constexpr, o/w the compiler can't generate the R2P instruction
for i in cutlass.range_constexpr(min(24, ncol - s * 24)):
out_bound = cutlass.Boolean(mask & (1 << i))
c = s * 24 + i
X[c] = -Float32.inf if out_bound else X[c]
# tidx = cute.arch.thread_idx()[0] % 256
# if tidx == 128:
# cute.printf("tidx = {}, s = {}, i = {}, row_limit_top = {}, row_limit_top_s = {}, mask = {}, out_bound = {}", tidx, s, i, row_limit_top, row_limit_top_s, mask, out_bound)
@cute.jit
def mask_r2p_dual_bound(
X: cute.Tensor,
col_limit_left: Int32, # Inclusive lower bound
col_limit_right: Int32, # Exclusive upper bound
) -> None:
"""
Dual-bound masking using two bitmasks for SM100, following mask_r2p.
Masks elements where: NOT (col_limit_left <= col < col_limit_right)
Uses bit manipulation to create a range mask:
mask_right = (1 << right) - 1 -> bits (right-1)..0 are 1
mask_left = (1 << left) - 1 -> bits (left-1)..0 are 1
mask_range = mask_range = mask_right & ~ mask_left -> bits (right-1)..left are 1
"""
ncol = const_expr(cute.size(X.shape))
for s in cutlass.range_constexpr(cute.ceil_div(ncol, 24)):
right_s = max(col_limit_right - s * 24, 0)
left_s = max(col_limit_left - s * 24, 0)
# otherwise cute dsl complains about python int too large to convert into c long
right_s = min(right_s, 24)
left_s = min(left_s, 24)
# bits (right-1)..left are 1
mask_right = (1 << right_s) - 1
mask_left = (1 << left_s) - 1
mask_range = mask_right & ~mask_left
# This needs to be range_constexpr, o/w the compiler can't generate the R2P instruction
for i in cutlass.range_constexpr(min(24, ncol - s * 24)):
in_bound = cutlass.Boolean(mask_range & (1 << i))
c = s * 24 + i
X[c] = X[c] if in_bound else -Float32.inf
@dataclass(frozen=True)
class AttentionMask:
tile_m: cutlass.Constexpr[int]
tile_n: cutlass.Constexpr[int]
seqlen_info: SeqlenInfoQK
window_size_left: Optional[Int32] = None
window_size_right: Optional[Int32] = None
qhead_per_kvhead_packgqa: cutlass.Constexpr[int] = 1 # only pass in if we're doing PackGQA
swap_AB: cutlass.Constexpr[bool] = False
@property
def seqlen_q(self) -> Int32:
return self.seqlen_info.seqlen_q
@property
def seqlen_k(self) -> Int32:
return self.seqlen_info.seqlen_k
@cute.jit
def apply_mask(
self,
acc_S: cute.Tensor,
batch_idx: cutlass.Int32,
head_idx: cutlass.Int32,
m_block: cutlass.Int32,
n_block: cutlass.Int32,
thr_mma: cute.TiledMma,
mask_seqlen: cutlass.Constexpr[bool],
mask_causal: cutlass.Constexpr[bool],
mask_local: cutlass.Constexpr[bool] = False,
mask_mod: cutlass.Constexpr[Optional[Callable]] = None,
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
) -> None:
assert not (mask_causal and mask_local), "mask_causal and mask_local cannot be both True"
acc_S_mn = layout_utils.reshape_acc_to_mn(acc_S, transpose=self.swap_AB)
acc_shape = (self.tile_m, self.tile_n)
cS = cute.make_identity_tensor(acc_shape if not self.swap_AB else acc_shape[::-1])
tScS_mn = layout_utils.reshape_acc_to_mn(thr_mma.partition_C(cS), transpose=self.swap_AB)
# We use t0ScS as these indices are known at compile time. We then must subtract the
# column limit by the thread column offset.
t0ScS_mn = layout_utils.reshape_acc_to_mn(
thr_mma.get_slice(0).partition_C(cS), transpose=self.swap_AB
)
ROW = 0 if const_expr(not self.swap_AB) else 1
COL = 1 if const_expr(not self.swap_AB) else 0
thr_col_offset = tScS_mn[0][COL]
# To handle edge cases of completely masked out rows where n_block_max = 0,
# we treat negative n_blocks as 0th n_block
# TODO: find more transparent solution
if n_block < 0:
n_block = 0
seqlenk_col_limit = self.seqlen_k - n_block * self.tile_n - thr_col_offset
if const_expr(not mask_causal and not mask_local and mask_mod is None):
if const_expr(mask_seqlen):
# The compiler now choses not to use R2P
r2p = const_expr(False and not self.swap_AB)
if const_expr(not r2p):
# traverse column index.
for c in cutlass.range(cute.size(tScS_mn.shape[1]), unroll_full=True):
oob = t0ScS_mn[0, c][COL] >= seqlenk_col_limit
for r in cutlass.range(cute.size(tScS_mn.shape[0]), unroll_full=True):
acc_S_mn[r, c] = -Float32.inf if oob else acc_S_mn[r, c]
else:
mask_r2p(acc_S_mn, seqlenk_col_limit, arch=90)
elif const_expr(
not mask_causal and not mask_local and mask_mod is not None
): # FlexAttention mask mod
nrow = const_expr(cute.size(tScS_mn.shape[0]))
ncol = const_expr(cute.size(tScS_mn.shape[1]))
has_fastdiv = const_expr(
fastdiv_mods is not None
and fastdiv_mods[0] is not None
and fastdiv_mods[1] is not None
)
wrap_aux_indices = const_expr(
has_fastdiv and mask_seqlen and const_expr(aux_tensors is not None)
)
for r in cutlass.range_constexpr(nrow):
# Respect swap_AB: ROW/COL determine which coordinate component corresponds to Q/KV.
local_row = tScS_mn[r, 0][ROW]
global_row_idx = local_row + m_block * self.tile_m
row_for_mod = global_row_idx
head_idx_for_mod = head_idx
if const_expr(self.qhead_per_kvhead_packgqa != 1):
head_offset = global_row_idx % self.qhead_per_kvhead_packgqa
head_idx_for_mod = head_idx * self.qhead_per_kvhead_packgqa + head_offset
row_for_mod = global_row_idx // self.qhead_per_kvhead_packgqa
row_for_seqlen = row_for_mod
if const_expr(wrap_aux_indices):
_, row_for_mod = divmod(row_for_mod, fastdiv_mods[0])
for col in cutlass.range_constexpr(ncol):
col_idx_local = t0ScS_mn[0, col][COL]
# Convert to absolute column index
global_col_idx = thr_col_offset + col_idx_local + n_block * self.tile_n
col_for_mod = global_col_idx
if const_expr(wrap_aux_indices):
_, col_for_mod = divmod(global_col_idx, fastdiv_mods[1])
batch_idx_ssa = utils.scalar_to_ssa(batch_idx, cutlass.Int32)
head_idx_ssa = utils.scalar_to_ssa(head_idx_for_mod, cutlass.Int32)
q_idx_ssa = utils.scalar_to_ssa(row_for_mod, cutlass.Int32)
kv_idx_ssa = utils.scalar_to_ssa(col_for_mod, cutlass.Int32)
mask_value = mask_mod(
batch_idx_ssa,
head_idx_ssa,
q_idx_ssa,
kv_idx_ssa,
self.seqlen_info,
aux_tensors,
)
cond = cutlass.Boolean(utils.ssa_to_scalar(mask_value))
if const_expr(mask_seqlen):
out_of_bounds = (row_for_seqlen >= self.seqlen_q) or (
global_col_idx >= self.seqlen_k
)
if out_of_bounds:
acc_S_mn[r, col] = -cutlass.Float32.inf
else:
acc_S_mn[r, col] = acc_S_mn[r, col] if cond else -cutlass.Float32.inf
else:
acc_S_mn[r, col] = acc_S_mn[r, col] if cond else -cutlass.Float32.inf
else: # Causal or local
if const_expr(not self.swap_AB):
# If PackGQA, we split the work of compute divmod among threads in the same row
threads_per_row = thr_mma.tv_layout_C.shape[0][0]
mma_m_idx = None
if const_expr(self.qhead_per_kvhead_packgqa != 1):
assert not self.swap_AB, "swap_AB with PackGQA not supported yet"
assert cute.arch.WARP_SIZE % threads_per_row == 0, (
"threads_per_row must divide WARP_SIZE"
)
assert cute.size(acc_S_mn.shape[0]) <= threads_per_row
tidx = thr_mma.thr_idx
mma_m_idx = (
m_block * self.tile_m + tScS_mn[tidx % threads_per_row, 0][0]
) // self.qhead_per_kvhead_packgqa
causal_row_offset = (
1 + self.seqlen_k - n_block * self.tile_n - self.seqlen_q - thr_col_offset
)
if const_expr(mask_causal):
r2p = const_expr(not self.swap_AB) # R2P trick, see apply_mask_sm100
for r in cutlass.range(cute.size(tScS_mn.shape[0]), unroll_full=True):
# get the column index limit based on current row. Only consider the row index, so the column index sets to 0.
if const_expr(self.qhead_per_kvhead_packgqa == 1):
row_idx = tScS_mn[r, 0][0] + m_block * self.tile_m
else:
row_idx = utils.shuffle_sync(
mma_m_idx, r % threads_per_row, width=threads_per_row
)
col_limit_right = row_idx + causal_row_offset
if const_expr(mask_seqlen):
col_limit_right = cutlass.min(col_limit_right, seqlenk_col_limit)
if const_expr(not r2p):
# traverse column index.
for c in cutlass.range(cute.size(tScS_mn.shape[1]), unroll_full=True):
acc_S_mn[r, c] = (
-Float32.inf
if t0ScS_mn[0, c][1] >= col_limit_right
else acc_S_mn[r, c]
)
else:
mask_r2p(acc_S_mn[r, None], col_limit_right, arch=90, rank1=True)
else: # Local
local_row_offset_right = (
causal_row_offset + self.window_size_right
if const_expr(self.window_size_right is not None)
else None
)
local_row_offset_left = (
causal_row_offset - 1 - self.window_size_left
if const_expr(self.window_size_left is not None)
else None
)
for r in cutlass.range(cute.size(tScS_mn.shape[0]), unroll_full=True):
if const_expr(self.qhead_per_kvhead_packgqa == 1):
row_idx = tScS_mn[r, 0][0] + m_block * self.tile_m
else:
row_idx = utils.shuffle_sync(
mma_m_idx, r % threads_per_row, width=threads_per_row
)
if const_expr(self.window_size_right is not None):
col_limit_right = row_idx + local_row_offset_right
else:
col_limit_right = self.tile_n
if const_expr(mask_seqlen):
col_limit_right = cutlass.min(col_limit_right, seqlenk_col_limit)
col_limit_left = (
row_idx + local_row_offset_left
if const_expr(self.window_size_left is not None)
else 0
)
# if cute.arch.thread_idx()[0] == 128: cute.printf("n_block = {}, r = {}, row_idx = {}, causal_row_offset = {}, col_limit_right = {}, col_limit_left = {}", n_block, r, row_idx, causal_row_offset, col_limit_right, col_limit_left)
# traverse column index.
for c in cutlass.range(cute.size(tScS_mn.shape[1]), unroll_full=True):
col_idx = t0ScS_mn[0, c][1]
# only consider the column index, so the row index sets to 0.
if col_idx >= col_limit_right or col_idx < col_limit_left:
acc_S_mn[r, c] = -Float32.inf
else: # swap_AB
assert self.qhead_per_kvhead_packgqa == 1
thr_row_offset = tScS_mn[0][ROW]
causal_row_offset = (
seqlenk_col_limit - self.seqlen_q + m_block * self.tile_m + thr_row_offset
)
if const_expr(mask_causal):
for c in cutlass.range(cute.size(tScS_mn.shape[1]), unroll_full=True):
col0 = t0ScS_mn[0, c][COL]
# If col0 is beyond the column limit, we want to mask out the entire
# column, by setting row limit to be self.tile_m.
row_limit_top = (
self.tile_m
if col0 >= seqlenk_col_limit and mask_seqlen
else col0 - causal_row_offset
)
for r in cutlass.range(cute.size(tScS_mn.shape[0]), unroll_full=True):
acc_S_mn[r, c] = (
-Float32.inf
if t0ScS_mn[r, 0][ROW] < row_limit_top
else acc_S_mn[r, c]
)
else:
for c in cutlass.range(cute.size(tScS_mn.shape[1]), unroll_full=True):
col0 = t0ScS_mn[0, c][COL]
# If col0 is beyond the column limit, we want to mask out the entire
# column, by setting row limit to be self.tile_m.
row_limit_top = (
self.tile_m
if col0 >= seqlenk_col_limit
else col0 - causal_row_offset - self.window_size_right
)
# TODO: do we need col_limit_sink?
row_limit_bot = col0 - causal_row_offset + self.window_size_left
for r in cutlass.range(cute.size(tScS_mn.shape[0]), unroll_full=True):
row_idx = t0ScS_mn[r, 0][ROW]
acc_S_mn[r, c] = (
-Float32.inf
if row_idx < row_limit_top or row_idx > row_limit_bot
else acc_S_mn[r, c]
)
@cute.jit
def apply_mask_sm100(
self,
acc_S: cute.Tensor,
m_block: Int32,
n_block: Int32,
thr_mma: cute.TiledMma,
thr_tmem_load: cute.TiledCopy,
mask_seqlen: cutlass.Constexpr[bool],
mask_causal: cutlass.Constexpr[bool],
mask_local: cutlass.Constexpr[bool] = False,
mask_mod: cutlass.Constexpr[Optional[Callable]] = None,
batch_idx: Int32 = None,
head_idx: Int32 = None,
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
head_divmod=None,
check_q_boundary: bool = False,
) -> None:
assert not (mask_causal and mask_local), "mask_causal and mask_local cannot be both True"
acc_shape = (self.tile_m, self.tile_n)
cS = cute.make_identity_tensor(acc_shape if not self.swap_AB else acc_shape[::-1])
tScS = thr_mma.partition_C(cS)
tScS = tScS[(None, None), 0, 0]
tScS_t2r = thr_tmem_load.partition_D(tScS)
# To handle edge cases of completely masked out rows where n_block_max = 0,
# we treat negative n_blocks as 0th n_block
# TODO: find more transparent solution
if n_block < 0:
n_block = 0
seqlenk_col_limit = self.seqlen_k - n_block * self.tile_n
r2p = True
if const_expr(not mask_causal and not mask_local and mask_mod is None):
if const_expr(mask_seqlen):
if const_expr(not r2p):
for i in cutlass.range(cute.size(tScS_t2r.shape), unroll_full=True):
# if tScS_t2r[i][1] >= seqlenk_col_limit:
# acc_S[i] = -Float32.inf
# For some reason the 2 lines above generate really bad SASS
acc_S[i] = -Float32.inf if tScS_t2r[i][1] >= seqlenk_col_limit else acc_S[i]
else:
mask_r2p(acc_S, seqlenk_col_limit, arch=100, rank1=True)
elif const_expr(not mask_causal and not mask_local and mask_mod is not None):
# Block sparse case w/ mask_mod
has_fastdiv = const_expr(
fastdiv_mods is not None
and fastdiv_mods[0] is not None
and fastdiv_mods[1] is not None
)
batch_idx_ssa = utils.scalar_to_ssa(batch_idx, cutlass.Int32)
ncol = const_expr(cute.size(tScS_t2r.shape))
for i in cutlass.range_constexpr(ncol):
row_coord = tScS_t2r[i][0] if not self.swap_AB else tScS_t2r[i][1]
col_coord = tScS_t2r[i][1] if not self.swap_AB else tScS_t2r[i][0]
global_row = row_coord + m_block * self.tile_m
global_col = col_coord + n_block * self.tile_n
if const_expr(self.qhead_per_kvhead_packgqa != 1):
assert head_divmod is not None
mask_row, head_offset = divmod(global_row, head_divmod)
head_idx_for_mod = head_idx * self.qhead_per_kvhead_packgqa + head_offset
else:
head_idx_for_mod = head_idx
mask_row = global_row
mask_row_for_mod = mask_row
if const_expr(has_fastdiv and aux_tensors is not None):
if check_q_boundary:
_, mask_row_for_mod = divmod(mask_row, fastdiv_mods[0])
global_col_for_mod = global_col
if const_expr(has_fastdiv and mask_seqlen and aux_tensors is not None):
_, global_col_for_mod = divmod(global_col, fastdiv_mods[1])
head_idx_ssa = utils.scalar_to_ssa(head_idx_for_mod, cutlass.Int32)
mask_row_ssa = utils.scalar_to_ssa(mask_row_for_mod, cutlass.Int32)
kv_idx_ssa = utils.scalar_to_ssa(global_col_for_mod, cutlass.Int32)
mask_value = mask_mod(
batch_idx_ssa,
head_idx_ssa,
mask_row_ssa,
kv_idx_ssa,
self.seqlen_info,
aux_tensors,
)
cond = cutlass.Boolean(utils.ssa_to_scalar(mask_value))
acc_S[i] = acc_S[i] if cond else -Float32.inf
if const_expr(mask_seqlen):
acc_S[i] = -Float32.inf if global_col >= self.seqlen_k else acc_S[i]
if check_q_boundary:
acc_S[i] = -Float32.inf if mask_row >= self.seqlen_q else acc_S[i]
else: # Causal or local
causal_row_offset = 1 + self.seqlen_k - n_block * self.tile_n - self.seqlen_q
row_idx = tScS_t2r[0][0] + m_block * self.tile_m
if const_expr(self.qhead_per_kvhead_packgqa != 1):
row_idx = row_idx // self.qhead_per_kvhead_packgqa
if const_expr(mask_causal):
col_limit_right = row_idx + causal_row_offset
if const_expr(mask_seqlen):
col_limit_right = cutlass.min(col_limit_right, seqlenk_col_limit)
# if cute.arch.thread_idx()[0] % 32 == 0:
# cute.printf("tidx = %d, tidx tmem = %d, row_idx = %d, col_limit_right = %d, causal_row_offset = %d\n", cute.arch.thread_idx()[0], thr_tmem_load.thr_idx, row_idx, col_limit_right, causal_row_offset)
ncol = const_expr(cute.size(tScS_t2r.shape))
if const_expr(not r2p):
for i in cutlass.range(ncol, unroll_full=True):
acc_S[i] = -Float32.inf if tScS_t2r[i][1] >= col_limit_right else acc_S[i]
else:
mask_r2p(acc_S, col_limit_right, arch=100, rank1=True)
else:
local_row_offset_right = (
causal_row_offset + self.window_size_right
if const_expr(self.window_size_right is not None)
else None
)
local_row_offset_left = (
causal_row_offset - 1 - self.window_size_left
if const_expr(self.window_size_left is not None)
else None
)
if const_expr(self.window_size_right is not None):
col_limit_right = row_idx + local_row_offset_right
else:
col_limit_right = self.tile_n
if const_expr(mask_seqlen):
col_limit_right = cutlass.min(col_limit_right, seqlenk_col_limit)
col_limit_left = (
row_idx + local_row_offset_left
if const_expr(self.window_size_left is not None)
else 0
)
if const_expr(not r2p):
# if cute.arch.thread_idx()[0] == 0 or cute.arch.thread_idx()[0] == 128: cute.printf("m_block = {}, n_block = {}, row_idx = {}, causal_row_offset = {}, col_limit_right = {}, col_limit_left = {}", m_block, n_block, row_idx, causal_row_offset, col_limit_right, col_limit_left)
for i in cutlass.range(cute.size(tScS_t2r.shape), unroll_full=True):
col_idx = tScS_t2r[i][1]
acc_S[i] = (
-Float32.inf
if col_idx >= col_limit_right or col_idx < col_limit_left
else acc_S[i]
)
else:
# XOR-based R2P dual bound masking
mask_r2p_dual_bound(acc_S, col_limit_left, col_limit_right)
@cute.jit
def apply_mask_sm100_transposed(
self,
acc_S: cute.Tensor,
tScS_t2r: cute.Tensor,
t0ScS_t2r: cute.Tensor,
m_block: cutlass.Int32,
n_block: cutlass.Int32,
mask_seqlen: cutlass.Constexpr,
mask_causal: cutlass.Constexpr,
mask_local: cutlass.Constexpr,
mask_mod: cutlass.Constexpr[Optional[Callable]] = None,
batch_idx: Int32 = None,
head_idx: Int32 = None,
aux_tensors: Optional[list] = None,
fastdiv_mods=(None, None),
is_full_block: bool = False,
check_m_boundary: bool = True,
) -> None:
"""
Backward pass: mask S = K @ Q.T where n_block tiles seqlen_k and m_block tiles seqlen_q.
Coordinate conventio:
- ROW corresponds to Q (m_block)
- COL corresponds to KV (n_block)
is_full_block: If True, skip mask_mod (all elements valid). Only apply seqlen masking.
check_m_boundary: If False, skip seqlen_q boundary check (optimization for non-boundary m_blocks).
When iterating m_blocks in forward order, only the last m_block may be partial.
"""
assert not (mask_causal and mask_local), "mask_causal and mask_local cannot be both True"
ROW = 0 if const_expr(not self.swap_AB) else 1
COL = 1 if const_expr(not self.swap_AB) else 0
# assert t0ScS_t2r[0][COL] == 0, "col0 == 0" # tmp comment for 2-cta bwd
thr_col_offset = tScS_t2r[0][COL]
seqlenk_col_limit = self.seqlen_k - n_block * self.tile_n - thr_col_offset
if const_expr(not mask_causal and not mask_local and mask_mod is not None):
# Block sparse case with mask_mod (backward)
#
# Coordinate convention: ROW → Q (m_block), COL → KV (n_block).
# These already account for swap_AB.
#
# FULL blocks: mask_mod returns True for all elements, so skip it.
# Still need seqlen bounds check (elements may be OOB on last m_block).
# PARTIAL blocks: apply mask_mod element-wise, then seqlen bounds.
if is_full_block:
if const_expr(mask_seqlen):
if seqlenk_col_limit <= 0:
# Entire tile is OOB for K
for i in cutlass.range(cute.size(acc_S.shape), unroll_full=True):
acc_S[i] = -cutlass.Float32.inf
elif check_m_boundary:
# Last m_block: check Q and K boundaries
ncol = const_expr(cute.size(tScS_t2r.shape))
for i in cutlass.range_constexpr(ncol):
row_coord = tScS_t2r[i][ROW]
col_coord = tScS_t2r[i][COL]
global_q = row_coord + m_block * self.tile_m
global_kv = col_coord + n_block * self.tile_n
q_out_of_bounds = global_q >= self.seqlen_q
kv_out_of_bounds = global_kv >= self.seqlen_k
out_of_bounds = q_out_of_bounds or kv_out_of_bounds
acc_S[i] = -cutlass.Float32.inf if out_of_bounds else acc_S[i]
else:
# Partial block
has_fastdiv = const_expr(
fastdiv_mods is not None
and fastdiv_mods[0] is not None
and fastdiv_mods[1] is not None
)
wrap_aux_indices = const_expr(
has_fastdiv and mask_seqlen and const_expr(aux_tensors is not None)
)
batch_idx_ssa = utils.scalar_to_ssa(batch_idx, cutlass.Int32)
head_idx_ssa = utils.scalar_to_ssa(head_idx, cutlass.Int32)
ncol = const_expr(cute.size(tScS_t2r.shape))
for i in cutlass.range_constexpr(ncol):
row_coord = tScS_t2r[i][ROW]
col_coord = tScS_t2r[i][COL]
global_q = row_coord + m_block * self.tile_m
global_kv = col_coord + n_block * self.tile_n
q_idx_for_mod = global_q
kv_idx_for_mod = global_kv
if const_expr(wrap_aux_indices):
_, q_idx_for_mod = divmod(global_q, fastdiv_mods[0])
_, kv_idx_for_mod = divmod(global_kv, fastdiv_mods[1])
q_idx_ssa = utils.scalar_to_ssa(q_idx_for_mod, cutlass.Int32)
kv_idx_ssa = utils.scalar_to_ssa(kv_idx_for_mod, cutlass.Int32)
mask_value = mask_mod(
batch_idx_ssa,
head_idx_ssa,
q_idx_ssa,
kv_idx_ssa,
self.seqlen_info,
aux_tensors,
)
cond = cutlass.Boolean(utils.ssa_to_scalar(mask_value))
acc_S[i] = acc_S[i] if cond else -cutlass.Float32.inf
if const_expr(mask_seqlen):
# check_m_boundary=False skips q check for non-boundary m_blocks
q_out_of_bounds = check_m_boundary and (global_q >= self.seqlen_q)
kv_out_of_bounds = global_kv >= self.seqlen_k
out_of_bounds = q_out_of_bounds or kv_out_of_bounds
acc_S[i] = -cutlass.Float32.inf if out_of_bounds else acc_S[i]
elif const_expr(not mask_causal and not mask_local):
if const_expr(mask_seqlen):
if seqlenk_col_limit <= 0:
for i in cutlass.range(cute.size(acc_S.shape), unroll_full=True):
acc_S[i] = -cutlass.Float32.inf
else: # Causal or local
thr_row_offset = tScS_t2r[0][ROW]
seqlenq_row_limit = self.seqlen_q - m_block * self.tile_m - thr_row_offset
causal_offset = seqlenq_row_limit - seqlenk_col_limit
if const_expr(mask_causal):
# tidx = cute.arch.thread_idx()[0] % 256
# if tidx < 32:
# cute.printf("tidx = {}, {} {}, {} {}", tidx, tScS_t2r[0][0], tScS_t2r[0][1], tScS_t2r[1][0], tScS_t2r[1][1])
row_limit_top = causal_offset
if const_expr(mask_seqlen):
# If col is beyond the column limit, we want to mask out the entire
# column, by setting row limit to be self.tile_m.
if seqlenk_col_limit <= 0:
row_limit_top = self.tile_m
r2p = True
if const_expr(not r2p):
for i in cutlass.range(cute.size(acc_S.shape), unroll_full=True):
acc_S[i] = (
-cutlass.Float32.inf if t0ScS_t2r[i][ROW] < row_limit_top else acc_S[i]
)
else:
num_rep = cute.size(tScS_t2r, mode=[0]) # 16 or 32
mask_r2p_transposed(acc_S, row_limit_top, num_rep)
else:
if const_expr(self.window_size_right is not None):
row_limit_top = causal_offset - self.window_size_right
else:
row_limit_top = 0
if const_expr(self.window_size_left is not None):
row_limit_bot = causal_offset + self.window_size_left
if const_expr(mask_seqlen):
if seqlenk_col_limit <= 0:
row_limit_top = self.tile_m
for i in cutlass.range(cute.size(acc_S.shape), unroll_full=True):
row_idx = t0ScS_t2r[i][ROW]
local_mask = row_idx < row_limit_top
if const_expr(self.window_size_left is not None):
local_mask |= row_idx > row_limit_bot
acc_S[i] = -cutlass.Float32.inf if local_mask else acc_S[i]
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/mask.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 612,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/seqlen_info.py | from typing import Optional
from dataclasses import dataclass
import cutlass
import cutlass.cute as cute
from cutlass import Int32, const_expr
"""
This consolidates all the info related to sequence length. This is so that we can do all
the gmem reads once at the beginning of each tile, rather than having to repeat these reads
to compute various things like n_block_min, n_block_max, etc.
"""
@dataclass(frozen=True)
class SeqlenInfo:
offset: cutlass.Int32
seqlen: cutlass.Int32
@staticmethod
def create(
batch_idx: cutlass.Int32,
seqlen_static: cutlass.Int32,
cu_seqlens: Optional[cute.Tensor] = None,
seqused: Optional[cute.Tensor] = None,
):
offset = 0 if const_expr(cu_seqlens is None) else cu_seqlens[batch_idx]
if const_expr(seqused is not None):
seqlen = seqused[batch_idx]
elif const_expr(cu_seqlens is not None):
seqlen = cu_seqlens[batch_idx + 1] - cu_seqlens[batch_idx]
else:
seqlen = seqlen_static
return SeqlenInfo(offset, seqlen)
@dataclass(frozen=True)
class SeqlenInfoQK:
offset_q: cutlass.Int32
offset_k: cutlass.Int32
padded_offset_q: cutlass.Int32
padded_offset_k: cutlass.Int32
seqlen_q: cutlass.Int32
seqlen_k: cutlass.Int32
has_cu_seqlens_q: cutlass.Constexpr[bool]
has_cu_seqlens_k: cutlass.Constexpr[bool]
has_seqused_q: cutlass.Constexpr[bool]
has_seqused_k: cutlass.Constexpr[bool]
@staticmethod
def create(
batch_idx: cutlass.Int32,
seqlen_q_static: cutlass.Int32,
seqlen_k_static: cutlass.Int32,
mCuSeqlensQ: Optional[cute.Tensor] = None,
mCuSeqlensK: Optional[cute.Tensor] = None,
mSeqUsedQ: Optional[cute.Tensor] = None,
mSeqUsedK: Optional[cute.Tensor] = None,
tile_m: cutlass.Constexpr[cutlass.Int32] = 128,
tile_n: cutlass.Constexpr[cutlass.Int32] = 128,
):
offset_q = 0 if const_expr(mCuSeqlensQ is None) else mCuSeqlensQ[batch_idx]
offset_k = 0 if const_expr(mCuSeqlensK is None) else mCuSeqlensK[batch_idx]
padded_offset_q = (
0
if const_expr(mCuSeqlensQ is None)
else (offset_q + batch_idx * tile_m) // tile_m * tile_m
)
padded_offset_k = (
0
if const_expr(mCuSeqlensK is None)
else (offset_k + batch_idx * tile_n) // tile_n * tile_n
)
if const_expr(mSeqUsedQ is not None):
seqlen_q = mSeqUsedQ[batch_idx]
else:
seqlen_q = (
seqlen_q_static
if const_expr(mCuSeqlensQ is None)
else mCuSeqlensQ[batch_idx + 1] - offset_q
)
if const_expr(mSeqUsedK is not None):
seqlen_k = mSeqUsedK[batch_idx]
else:
seqlen_k = (
seqlen_k_static
if const_expr(mCuSeqlensK is None)
else mCuSeqlensK[batch_idx + 1] - offset_k
)
has_cu_seqlens_q: int = mCuSeqlensQ is not None
has_cu_seqlens_k: int = mCuSeqlensK is not None
has_seqused_q: int = mSeqUsedQ is not None
has_seqused_k: int = mSeqUsedK is not None
return SeqlenInfoQK(
offset_q,
offset_k,
padded_offset_q,
padded_offset_k,
seqlen_q,
seqlen_k,
has_cu_seqlens_q,
has_cu_seqlens_k,
has_seqused_q,
has_seqused_k,
)
def offset_batch_Q(
self,
mQ: cute.Tensor,
batch_idx: Int32,
dim: int,
padded: cutlass.Constexpr[bool] = False,
) -> cute.Tensor:
"""Seqlen must be the first dimension of mQ"""
if const_expr(not self.has_cu_seqlens_q):
idx = (None,) * dim + (batch_idx,) + (None,) * (cute.rank(mQ) - 1 - dim)
return mQ[idx]
else:
offset_q = self.offset_q if const_expr(not padded) else self.padded_offset_q
offset = offset_q if const_expr(cute.rank(mQ.shape[0]) == 1) else (0, offset_q)
idx = (offset,) + (0,) * (cute.rank(mQ) - 1)
return cute.domain_offset(idx, mQ)
def offset_batch_K(
self,
mK: cute.Tensor,
batch_idx: Int32,
dim: int,
padded: cutlass.Constexpr[bool] = False,
) -> cute.Tensor:
"""Seqlen must be the first dimension of mK"""
if const_expr(not self.has_cu_seqlens_k):
idx = (None,) * dim + (batch_idx,) + (None,) * (cute.rank(mK) - 1 - dim)
return mK[idx]
else:
offset_k = self.offset_k if const_expr(not padded) else self.padded_offset_k
idx = (offset_k,) + (0,) * (cute.rank(mK) - 1)
return cute.domain_offset(idx, mK)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/seqlen_info.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Dao-AILab/flash-attention:flash_attn/cute/softmax.py | # Copyright (c) 2025, Tri Dao.
import math
import operator
from typing import Tuple
from dataclasses import dataclass
import cutlass
import cutlass.cute as cute
from cutlass import Float32
from quack import layout_utils
import flash_attn.cute.utils as utils
from quack.cute_dsl_utils import ParamsBase
from flash_attn.cute.seqlen_info import SeqlenInfoQK
@dataclass
class Softmax(ParamsBase):
scale_log2: Float32
num_rows: cutlass.Constexpr[int]
row_max: cute.Tensor
row_sum: cute.Tensor
arch: cutlass.Constexpr[int] = 80
softmax_scale: Float32 | None = None
@staticmethod
def create(
scale_log2: Float32,
num_rows: cutlass.Constexpr[int],
arch: cutlass.Constexpr[int] = 80,
softmax_scale: Float32 | None = None,
):
row_max = cute.make_rmem_tensor(num_rows, Float32)
row_sum = cute.make_rmem_tensor(num_rows, Float32)
return Softmax(scale_log2, num_rows, row_max, row_sum, arch, softmax_scale)
def reset(self) -> None:
self.row_max.fill(-Float32.inf)
self.row_sum.fill(0.0)
def _compute_row_max(
self, acc_S_row: cute.TensorSSA, init_val: float | Float32 | None = None
) -> Float32:
return utils.fmax_reduce(acc_S_row, init_val, arch=self.arch)
def _compute_row_sum(
self, acc_S_row_exp: cute.TensorSSA, init_val: float | Float32 | None = None
) -> Float32:
return utils.fadd_reduce(acc_S_row_exp, init_val, arch=self.arch)
@cute.jit
def online_softmax(
self,
acc_S: cute.Tensor,
is_first: cutlass.Constexpr[bool] = False,
check_inf: cutlass.Constexpr[bool] = True,
) -> cute.Tensor:
"""Apply online softmax and return the row_scale to rescale O.
:param acc_S: acc_S tensor
:type acc_S: cute.Tensor
:param is_first: is first n_block
:type is_first: cutlass.Constexpr
"""
# Change acc_S to M,N layout view.
acc_S_mn = layout_utils.reshape_acc_to_mn(acc_S)
row_scale = cute.make_fragment_like(self.row_max, Float32)
row_max = self.row_max
row_sum = self.row_sum
scale_log2 = self.scale_log2
arch = self.arch
# Each iteration processes one row of acc_S
for r in cutlass.range(cute.size(row_max), unroll_full=True):
acc_S_row = acc_S_mn[r, None].load() # (n_block_size)
row_max_cur = utils.fmax_reduce(
acc_S_row,
init_val=row_max[r] if cutlass.const_expr(not is_first) else None,
arch=arch,
)
row_max_cur = cute.arch.warp_reduction_max(row_max_cur, threads_in_group=4)
# Update row_max before changing row_max_cur to safe value for -inf
row_max_prev = row_max[r]
row_max[r] = row_max_cur
if cutlass.const_expr(check_inf):
row_max_cur = 0.0 if row_max_cur == -Float32.inf else row_max_cur
if cutlass.const_expr(is_first):
row_max_cur_scaled = row_max_cur * scale_log2
acc_S_row_exp = cute.math.exp2(
acc_S_row * scale_log2 - row_max_cur_scaled, fastmath=True
)
acc_S_row_sum = utils.fadd_reduce(acc_S_row_exp, init_val=None, arch=arch)
row_scale[r] = 1.0
else:
row_max_cur_scaled = row_max_cur * scale_log2
acc_S_row_exp = cute.math.exp2(
acc_S_row * scale_log2 - row_max_cur_scaled, fastmath=True
)
# row_scale[r] = cute.math.exp2(row_max_prev * self.scale_log2 - row_max_cur_scaled)
row_scale[r] = cute.math.exp2(
(row_max_prev - row_max_cur) * scale_log2, fastmath=True
)
acc_S_row_sum = utils.fadd_reduce(
acc_S_row_exp, init_val=row_sum[r] * row_scale[r], arch=arch
)
row_sum[r] = acc_S_row_sum
acc_S_mn[r, None].store(acc_S_row_exp)
return row_scale
@cute.jit
def finalize(
self, final_scale: Float32 = 1.0, sink_val: Float32 | cute.Tensor | None = None
) -> cute.Tensor:
"""Finalize the online softmax by computing the scale and logsumexp."""
if cutlass.const_expr(sink_val is not None and isinstance(sink_val, cute.Tensor)):
assert cute.size(sink_val) == cute.size(self.row_sum)
row_sum = self.row_sum
row_max = self.row_max
scale_log2 = self.scale_log2
# quad reduction for row_sum as we didn't do it during each iteration of online softmax
row_sum.store(utils.warp_reduce(row_sum.load(), operator.add, width=4))
row_scale = cute.make_fragment_like(row_max, Float32)
for r in cutlass.range(cute.size(row_sum), unroll_full=True):
if cutlass.const_expr(sink_val is not None):
sink_val_cur = sink_val if not isinstance(sink_val, cute.Tensor) else sink_val[r]
LOG2_E = math.log2(math.e)
row_sum[r] += cute.math.exp2(
sink_val_cur * LOG2_E - row_max[r] * scale_log2, fastmath=True
)
# if row_sum is zero or nan, set acc_O_mn_row to 1.0
acc_O_mn_row_is_zero_or_nan = row_sum[r] == 0.0 or row_sum[r] != row_sum[r]
row_scale[r] = (
cute.arch.rcp_approx(row_sum[r] if not acc_O_mn_row_is_zero_or_nan else 1.0)
) * final_scale
row_sum_cur = row_sum[r]
LN2 = math.log(2.0)
row_sum[r] = (
(row_max[r] * scale_log2 + cute.math.log2(row_sum_cur, fastmath=True)) * LN2
if not acc_O_mn_row_is_zero_or_nan
else -Float32.inf
)
return row_scale
@cute.jit
def rescale_O(self, acc_O: cute.Tensor, row_scale: cute.Tensor) -> None:
"""Scale each row of acc_O by the given scale tensor.
:param acc_O: input tensor
:type acc_O: cute.Tensor
:param row_scale: row_scale tensor
:type row_scale: cute.Tensor
"""
acc_O_mn = layout_utils.reshape_acc_to_mn(acc_O)
assert cute.size(row_scale) == cute.size(acc_O_mn, mode=[0])
for r in cutlass.range(cute.size(row_scale), unroll_full=True):
acc_O_mn[r, None].store(acc_O_mn[r, None].load() * row_scale[r])
@dataclass
class SoftmaxSm100(Softmax):
rescale_threshold: cutlass.Constexpr[float] = 0.0
@staticmethod
def create(
scale_log2: Float32,
rescale_threshold: cutlass.Constexpr[float] = 0.0,
softmax_scale: Float32 | None = None,
):
num_rows = 1
arch = 100
row_max = cute.make_rmem_tensor(num_rows, Float32)
row_sum = cute.make_rmem_tensor(num_rows, Float32)
return SoftmaxSm100(
scale_log2,
num_rows,
row_max,
row_sum,
arch,
softmax_scale,
rescale_threshold=rescale_threshold,
)
@cute.jit
def update_row_max(self, acc_S_row: cute.TensorSSA, is_first: int) -> Tuple[Float32, Float32]:
if cutlass.const_expr(is_first):
row_max_new = self._compute_row_max(acc_S_row)
row_max_safe = row_max_new if row_max_new != -cutlass.Float32.inf else 0.0
acc_scale = 0.0
else:
row_max_old = self.row_max[0]
row_max_new = self._compute_row_max(acc_S_row, init_val=row_max_old)
row_max_safe = row_max_new if row_max_new != -cutlass.Float32.inf else 0.0
acc_scale_ = (row_max_old - row_max_safe) * self.scale_log2
acc_scale = cute.math.exp2(acc_scale_, fastmath=True)
if cutlass.const_expr(self.rescale_threshold > 0.0):
if acc_scale_ >= -self.rescale_threshold:
row_max_new = row_max_old
row_max_safe = row_max_old
acc_scale = 1.0
self.row_max[0] = row_max_new
return row_max_safe, acc_scale
def update_row_sum(
self, acc_S_row_exp: cute.TensorSSA, row_scale: Float32, is_first: int = False
) -> None:
init_val = self.row_sum[0] * row_scale if cutlass.const_expr(not is_first) else None
# self.row_sum[0] = self._compute_row_sum(acc_S_row_exp, init_val=self.row_sum[0] * row_scale)
self.row_sum[0] = self._compute_row_sum(acc_S_row_exp, init_val=init_val)
# tmp = self._compute_row_sum(acc_S_row_exp)
# self.row_sum[0] = self.row_sum[0] * row_scale + tmp
@cute.jit
def scale_subtract_rowmax(
self,
acc_S_row: cute.Tensor,
row_max: Float32,
):
assert cute.size(acc_S_row.shape) % 2 == 0, "acc_S_row must have an even number of elements"
row_max_scaled = row_max * self.scale_log2
for i in cutlass.range(0, cute.size(acc_S_row.shape), 2, unroll_full=True):
acc_S_row[i], acc_S_row[i + 1] = cute.arch.fma_packed_f32x2(
(acc_S_row[i], acc_S_row[i + 1]),
(self.scale_log2, self.scale_log2),
(-row_max_scaled, -row_max_scaled),
)
@cute.jit
def apply_exp2_convert(
self,
acc_S_row: cute.Tensor,
acc_S_row_converted: cute.Tensor,
ex2_emu_freq: cutlass.Constexpr[int] = 0,
ex2_emu_res: cutlass.Constexpr[int] = 4,
ex2_emu_start_frg: cutlass.Constexpr[int] = 0,
):
assert cute.size(acc_S_row.shape) % 2 == 0, "acc_S_row must have an even number of elements"
frg_tile = 32
assert frg_tile % 2 == 0
frg_cnt = cute.size(acc_S_row) // frg_tile
assert cute.size(acc_S_row) % frg_tile == 0
acc_S_row_frg = cute.logical_divide(acc_S_row, cute.make_layout(frg_tile))
acc_S_row_converted_frg = cute.logical_divide(
acc_S_row_converted, cute.make_layout(frg_tile)
)
for j in cutlass.range_constexpr(frg_cnt):
for k in cutlass.range_constexpr(0, cute.size(acc_S_row_frg, mode=[0]), 2):
# acc_S_row_frg[k, j] = cute.math.exp2(acc_S_row_frg[k, j], fastmath=True)
# acc_S_row_frg[k + 1, j] = cute.math.exp2(acc_S_row_frg[k + 1, j], fastmath=True)
if cutlass.const_expr(ex2_emu_freq == 0):
acc_S_row_frg[k, j] = cute.math.exp2(acc_S_row_frg[k, j], fastmath=True)
acc_S_row_frg[k + 1, j] = cute.math.exp2(acc_S_row_frg[k + 1, j], fastmath=True)
else:
if cutlass.const_expr(
k % ex2_emu_freq < ex2_emu_freq - ex2_emu_res
or j >= frg_cnt - 1
or j < ex2_emu_start_frg
):
acc_S_row_frg[k, j] = cute.math.exp2(acc_S_row_frg[k, j], fastmath=True)
acc_S_row_frg[k + 1, j] = cute.math.exp2(
acc_S_row_frg[k + 1, j], fastmath=True
)
else:
# acc_S_row_frg[k, j], acc_S_row_frg[k + 1, j] = utils.e2e_asm2(acc_S_row_frg[k, j], acc_S_row_frg[k + 1, j])
acc_S_row_frg[k, j], acc_S_row_frg[k + 1, j] = utils.ex2_emulation_2(
acc_S_row_frg[k, j], acc_S_row_frg[k + 1, j]
)
acc_S_row_converted_frg[None, j].store(
acc_S_row_frg[None, j].load().to(acc_S_row_converted.element_type)
)
@cute.jit
def scale_apply_exp2_convert(
self,
acc_S_row: cute.Tensor,
row_max: Float32,
acc_S_row_converted: cute.Tensor,
):
assert cute.size(acc_S_row.shape) % 2 == 0, "acc_S_row must have an even number of elements"
minus_row_max_scaled = -row_max * self.scale_log2
for i in cutlass.range_constexpr(0, cute.size(acc_S_row.shape), 2):
acc_S_row[i], acc_S_row[i + 1] = cute.arch.fma_packed_f32x2(
(acc_S_row[i], acc_S_row[i + 1]),
(self.scale_log2, self.scale_log2),
(minus_row_max_scaled, minus_row_max_scaled),
)
# for i in cutlass.range_constexpr(0, cute.size(acc_S_row.shape), 2):
# acc_S_row[i], acc_S_row[i + 1] = cute.arch.fma_packed_f32x2(
# (acc_S_row[i], acc_S_row[i + 1]),
# (self.scale_log2, self.scale_log2),
# (minus_row_max_scaled, minus_row_max_scaled),
# )
# acc_S_row[i] = cute.math.exp2(acc_S_row[i], fastmath=True)
# acc_S_row[i + 1] = cute.math.exp2(acc_S_row[i + 1], fastmath=True)
frg_tile = 32
assert frg_tile % 2 == 0
frg_cnt = cute.size(acc_S_row) // frg_tile
assert cute.size(acc_S_row) % frg_tile == 0
acc_S_row_frg = cute.logical_divide(acc_S_row, cute.make_layout(frg_tile))
acc_S_row_converted_frg = cute.logical_divide(
acc_S_row_converted, cute.make_layout(frg_tile)
)
for j in cutlass.range_constexpr(frg_cnt):
for k in cutlass.range_constexpr(0, cute.size(acc_S_row_frg, mode=[0]), 2):
# acc_S_row_frg[k, j], acc_S_row_frg[k + 1, j] = (
# cute.arch.fma_packed_f32x2(
# (acc_S_row_frg[k, j], acc_S_row_frg[k + 1, j]),
# (self.scale_log2, self.scale_log2),
# (minus_row_max_scaled, minus_row_max_scaled),
# )
# )
# acc_S_row_frg[k, j] = cute.math.exp2(acc_S_row_frg[k, j], fastmath=True)
# acc_S_row_frg[k + 1, j] = cute.math.exp2(acc_S_row_frg[k + 1, j], fastmath=True)
acc_S_row_frg[k, j] = cute.math.exp2(acc_S_row_frg[k, j], fastmath=True)
acc_S_row_frg[k + 1, j] = cute.math.exp2(acc_S_row_frg[k + 1, j], fastmath=True)
acc_S_row_converted_frg[None, j].store(
acc_S_row_frg[None, j].load().to(acc_S_row_converted.element_type)
)
@cute.jit
def floor_if_packed(
q_idx,
qhead_per_kvhead: cutlass.Constexpr[int],
) -> cute.Tensor:
"""Convert q_idx to packed format for Pack-GQA."""
if cutlass.const_expr(qhead_per_kvhead == 1):
return q_idx
return q_idx // qhead_per_kvhead
@cute.jit
def apply_score_mod_inner(
score_tensor,
index_tensor,
score_mod: cutlass.Constexpr,
batch_idx,
head_idx,
softmax_scale,
vec_size: cutlass.Constexpr,
qk_acc_dtype: cutlass.Constexpr,
aux_tensors,
fastdiv_mods,
seqlen_info: SeqlenInfoQK,
constant_q_idx: cutlass.Constexpr,
qhead_per_kvhead: cutlass.Constexpr[int] = 1,
transpose_indices: cutlass.Constexpr[bool] = False,
):
"""Shared implementation for applying score modification.
Args:
score_tensor: The scores to modify (acc_S for flash_fwd, tSrS_t2r for sm100)
index_tensor: Index positions (tScS for flash_fwd, tScS_t2r for sm100)
score_mod: The score modification function to apply
batch_idx: Batch index
head_idx: Head index
softmax_scale: Scale to apply
vec_size: Vector size for processing elements
qk_acc_dtype: Data type for accumulator
aux_tensors: Optional aux_tensors for FlexAttention
fastdiv_mods: Tuple of (seqlen_q_divmod, seqlen_k_divmod) for wrapping
seqlen_info: Sequence length info
constant_q_idx: If provided, use this constant for all q_idx values
If None, compute q_idx per-element
qhead_per_kvhead_packgqa: Pack-GQA replication factor. Divide q_idx by this
when greater than 1 so score mods see logical heads.
transpose_indices: If True, swap q_idx/kv_idx in index_tensor (for bwd kernel where S is transposed)
"""
# Index positions in the index_tensor tuple
# Forward: index_tensor[...][0] = q_idx, index_tensor[...][1] = kv_idx
# Backward (transposed): index_tensor[...][0] = kv_idx, index_tensor[...][1] = q_idx
if cutlass.const_expr(transpose_indices):
q_idx_pos = cutlass.const_expr(1)
kv_idx_pos = cutlass.const_expr(0)
else:
q_idx_pos = cutlass.const_expr(0)
kv_idx_pos = cutlass.const_expr(1)
n_vals = cutlass.const_expr(cute.size(score_tensor.shape))
score_vec = cute.make_rmem_tensor(vec_size, qk_acc_dtype)
kv_idx_vec = cute.make_rmem_tensor(vec_size, cutlass.Int32)
# SSA values for batch (constant across all elements)
batch_idx_ssa = utils.scalar_to_ssa(batch_idx, cutlass.Int32).broadcast_to((vec_size,))
# Handle q_idx based on whether it's constant
q_idx_vec = cute.make_rmem_tensor(vec_size, cutlass.Int32)
# For Pack-GQA with non-constant q_idx, we need per-element head indices
# since a thread my process multiple query head indices
if cutlass.const_expr(qhead_per_kvhead > 1 and constant_q_idx is None):
head_idx_vec = cute.make_rmem_tensor(vec_size, cutlass.Int32)
for i in cutlass.range(0, n_vals, vec_size, unroll_full=True):
for j in cutlass.range(vec_size, unroll_full=True):
score_vec[j] = score_tensor[i + j] * softmax_scale
# Extract head offset from packed q_idx for Pack-GQA
if cutlass.const_expr(qhead_per_kvhead > 1 and constant_q_idx is None):
q_idx_packed = index_tensor[i + j][q_idx_pos]
# Building up the logical q_head idx: final_q_head = kv_head * qhead_per_kvhead + (q_physical % qhead_per_kvhead)
q_idx_logical = q_idx_packed // qhead_per_kvhead
head_offset = q_idx_packed - q_idx_logical * qhead_per_kvhead
head_idx_vec[j] = head_idx * qhead_per_kvhead + head_offset
# If we will do loads we mod, in order to not read OOB
if cutlass.const_expr(aux_tensors is not None and fastdiv_mods is not None):
if cutlass.const_expr(constant_q_idx is None):
seqlen_q_divmod, seqlen_k_divmod = fastdiv_mods
q_idx_floored = floor_if_packed(
index_tensor[i + j][q_idx_pos], qhead_per_kvhead
)
_, q_idx_wrapped = divmod(q_idx_floored, seqlen_q_divmod)
q_idx_vec[j] = q_idx_wrapped
else:
_, seqlen_k_divmod = fastdiv_mods
_, kv_idx_wrapped = divmod(index_tensor[i + j][kv_idx_pos], seqlen_k_divmod)
kv_idx_vec[j] = kv_idx_wrapped
else:
# No bounds checking - direct indexing
if constant_q_idx is None:
q_idx_vec[j] = floor_if_packed(index_tensor[i + j][q_idx_pos], qhead_per_kvhead)
kv_idx_vec[j] = index_tensor[i + j][kv_idx_pos]
# Convert to SSA for score_mod call
score_ssa = score_vec.load()
kv_idx_ssa = kv_idx_vec.load()
if cutlass.const_expr(constant_q_idx is None):
q_idx_ssa = q_idx_vec.load()
else:
# NB we do not apply Pack-GQA division here, as constant_q_idx is assumed to already be logical
q_idx_const = constant_q_idx
q_idx_ssa = utils.scalar_to_ssa(q_idx_const, cutlass.Int32).broadcast_to((vec_size,))
# Compute head_idx_ssa: per-element for Pack-GQA with non-constant q_idx, constant otherwise
if cutlass.const_expr(qhead_per_kvhead > 1 and constant_q_idx is None):
head_idx_ssa = head_idx_vec.load()
else:
head_idx_ssa = utils.scalar_to_ssa(head_idx, cutlass.Int32).broadcast_to((vec_size,))
aux_args = []
if cutlass.const_expr(aux_tensors is not None):
aux_args = aux_tensors
post_mod_scores = score_mod(
score_ssa,
batch_idx_ssa,
head_idx_ssa,
q_idx=q_idx_ssa,
kv_idx=kv_idx_ssa,
seqlen_info=seqlen_info,
aux_tensors=aux_args,
)
# Write back modified scores
score_vec.store(post_mod_scores)
for j in cutlass.range(vec_size, unroll_full=True):
score_tensor[i + j] = score_vec[j]
@cute.jit
def apply_score_mod_bwd_inner(
grad_tensor,
score_tensor,
index_tensor,
score_mod_bwd: cutlass.Constexpr,
batch_idx,
head_idx,
softmax_scale,
vec_size: cutlass.Constexpr,
qk_acc_dtype: cutlass.Constexpr,
aux_tensors,
fastdiv_mods,
seqlen_info,
constant_q_idx: cutlass.Constexpr,
qhead_per_kvhead: cutlass.Constexpr[int] = 1,
transpose_indices: cutlass.Constexpr[bool] = False,
):
"""Apply backward score modification (joint graph).
Args:
grad_tensor: in/out: dlogits rewritten in-place with d(scaled_scores)
score_tensor: pre-mod scores (unscaled QK tile), scaled by softmax_scale internally
index_tensor: Index positions (same as forward)
score_mod_bwd: The backward score modification function (joint graph)
batch_idx: Batch index
head_idx: Head index
softmax_scale: Scale to apply to score_tensor
vec_size: Vector size for processing elements
qk_acc_dtype: Data type for accumulator
aux_tensors: Optional aux_tensors for FlexAttention
fastdiv_mods: Tuple of (seqlen_q_divmod, seqlen_k_divmod) for wrapping
seqlen_info: Sequence length info
constant_q_idx: If provided, use this constant for all q_idx values
qhead_per_kvhead: Pack-GQA replication factor
transpose_indices: If True, swap q_idx/kv_idx in index_tensor
"""
# Index positions in the index_tensor tuple
# Forward: index_tensor[...][0] = q_idx, index_tensor[...][1] = kv_idx
# Backward (transposed): index_tensor[...][0] = kv_idx, index_tensor[...][1] = q_idx
if cutlass.const_expr(transpose_indices):
q_idx_pos = cutlass.const_expr(1)
kv_idx_pos = cutlass.const_expr(0)
else:
q_idx_pos = cutlass.const_expr(0)
kv_idx_pos = cutlass.const_expr(1)
n_vals = cutlass.const_expr(cute.size(grad_tensor.shape))
grad_vec = cute.make_fragment(vec_size, qk_acc_dtype)
score_vec = cute.make_fragment(vec_size, qk_acc_dtype)
kv_idx_vec = cute.make_fragment(vec_size, cutlass.Int32)
batch_idx_ssa = utils.scalar_to_ssa(batch_idx, cutlass.Int32).broadcast_to((vec_size,))
q_idx_vec = cute.make_fragment(vec_size, cutlass.Int32)
# For Pack-GQA with non-constant q_idx, we need per-element head indices
if cutlass.const_expr(qhead_per_kvhead > 1 and constant_q_idx is None):
head_idx_vec = cute.make_fragment(vec_size, cutlass.Int32)
for i in cutlass.range(0, n_vals, vec_size, unroll_full=True):
for j in cutlass.range(vec_size, unroll_full=True):
grad_vec[j] = grad_tensor[i + j]
# Scale score so joint graph sees same value as forward score_mod
score_vec[j] = score_tensor[i + j] * softmax_scale
if cutlass.const_expr(qhead_per_kvhead > 1 and constant_q_idx is None):
q_idx_packed = index_tensor[i + j][q_idx_pos]
q_idx_logical = q_idx_packed // qhead_per_kvhead
head_offset = q_idx_packed - q_idx_logical * qhead_per_kvhead
head_idx_vec[j] = head_idx * qhead_per_kvhead + head_offset
if cutlass.const_expr(aux_tensors is not None and fastdiv_mods is not None):
if cutlass.const_expr(constant_q_idx is None):
seqlen_q_divmod, seqlen_k_divmod = fastdiv_mods
q_idx_floored = floor_if_packed(
index_tensor[i + j][q_idx_pos], qhead_per_kvhead
)
_, q_idx_wrapped = divmod(q_idx_floored, seqlen_q_divmod)
q_idx_vec[j] = q_idx_wrapped
else:
_, seqlen_k_divmod = fastdiv_mods
_, kv_idx_wrapped = divmod(index_tensor[i + j][kv_idx_pos], seqlen_k_divmod)
kv_idx_vec[j] = kv_idx_wrapped
else:
# No bounds checking - direct indexing
if constant_q_idx is None:
q_idx_vec[j] = floor_if_packed(index_tensor[i + j][q_idx_pos], qhead_per_kvhead)
kv_idx_vec[j] = index_tensor[i + j][kv_idx_pos]
grad_ssa = grad_vec.load()
score_ssa = score_vec.load()
kv_idx_ssa = kv_idx_vec.load()
if cutlass.const_expr(constant_q_idx is None):
q_idx_ssa = q_idx_vec.load()
else:
q_idx_ssa = utils.scalar_to_ssa(constant_q_idx, cutlass.Int32).broadcast_to((vec_size,))
if cutlass.const_expr(qhead_per_kvhead > 1 and constant_q_idx is None):
head_idx_ssa = head_idx_vec.load()
else:
head_idx_ssa = utils.scalar_to_ssa(head_idx, cutlass.Int32).broadcast_to((vec_size,))
aux_args = []
if cutlass.const_expr(aux_tensors is not None):
aux_args = aux_tensors
grad_out_ssa = score_mod_bwd(
grad_ssa,
score_ssa,
batch_idx_ssa,
head_idx_ssa,
q_idx=q_idx_ssa,
kv_idx=kv_idx_ssa,
seqlen_info=seqlen_info,
aux_tensors=aux_args,
)
grad_vec.store(grad_out_ssa)
for j in cutlass.range(vec_size, unroll_full=True):
grad_tensor[i + j] = grad_vec[j]
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/softmax.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 526,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/cute/utils.py | # Copyright (c) 2025, Tri Dao.
import math
import hashlib
import inspect
from typing import Type, Callable, Optional, Tuple, overload
import cutlass
import cutlass.cute as cute
from cutlass import Float32, const_expr
from cutlass.cutlass_dsl import T, dsl_user_op
from cutlass._mlir.dialects import nvvm, llvm
from cutlass.cute.runtime import from_dlpack
import quack.activation
_MIXER_ATTRS = ("__vec_size__",)
# Obtained from sollya:
# fpminimax(exp(x * log(2.0)), 1, [|1,24...|],[0;1],relative);
POLY_EX2 = {
0: (1.0),
1: (
1.0,
0.922497093677520751953125,
),
2: (
1.0,
0.6657850742340087890625,
0.330107033252716064453125,
),
3: (
1.0,
0.695146143436431884765625,
0.227564394474029541015625,
0.077119089663028717041015625,
),
4: (
1.0,
0.693042695522308349609375,
0.2412912547588348388671875,
5.2225358784198760986328125e-2,
1.3434938155114650726318359375e-2,
),
5: (
1.0,
0.693151414394378662109375,
0.24016360938549041748046875,
5.5802188813686370849609375e-2,
9.01452265679836273193359375e-3,
1.86810153536498546600341796875e-3,
),
}
def _compute_base_hash(func: Callable) -> str:
"""Compute hash from source code or bytecode and closure values."""
try:
data = inspect.getsource(func).encode()
except (OSError, TypeError):
if hasattr(func, "__code__") and func.__code__ is not None:
data = func.__code__.co_code
else:
data = repr(func).encode()
hasher = hashlib.sha256(data)
if hasattr(func, "__closure__") and func.__closure__ is not None:
for cell in func.__closure__:
hasher.update(repr(cell.cell_contents).encode())
return hasher.hexdigest()
def hash_callable(
func: Callable, mixer_attrs: Tuple[str] = _MIXER_ATTRS, set_cute_hash: bool = True
) -> str:
"""Hash a callable based on the source code or bytecode and closure values.
Fast-path: if the callable (or its __wrapped__ base) has a ``__cute_hash__``
attribute, that value is returned immediately as the base hash, then
metadata dunders are mixed in to produce the final dict-key hash.
set_cute_hash: whether or not to set func.__cute_hash__
"""
# Resolve base hash
if hasattr(func, "__cute_hash__"):
base_hash = func.__cute_hash__
else:
# Unwrap decorated functions (e.g., cute.jit wrappers).
base_func = getattr(func, "__wrapped__", func)
if hasattr(base_func, "__cute_hash__"):
base_hash = base_func.__cute_hash__
else:
base_hash = _compute_base_hash(base_func)
if set_cute_hash:
base_func.__cute_hash__ = base_hash
# Mix in mutable metadata dunders
mixer_values = tuple(getattr(func, attr, None) for attr in mixer_attrs)
if all(v is None for v in mixer_values):
return base_hash
hasher = hashlib.sha256(base_hash.encode())
for attr, val in zip(_MIXER_ATTRS, mixer_values):
hasher.update(f"{attr}={val!r}".encode())
return hasher.hexdigest()
def create_softcap_scoremod(softcap_val):
inv_softcap = 1.0 / softcap_val
@cute.jit
def scoremod_premask_fn(acc_S_SSA, batch_idx, head_idx, q_idx, kv_idx, aux_tensors):
scores = acc_S_SSA * inv_softcap
return scores * cute.math.tanh(scores, fastmath=True)
return scoremod_premask_fn
def convert_from_dlpack(x, leading_dim, alignment=16, divisibility=1) -> cute.Tensor:
return (
from_dlpack(x, assumed_align=alignment)
.mark_layout_dynamic(leading_dim=leading_dim)
.mark_compact_shape_dynamic(
mode=leading_dim, stride_order=x.dim_order(), divisibility=divisibility
)
)
def convert_from_dlpack_leading_static(
x, leading_dim, alignment=16, static_modes=None, stride_order=None
) -> cute.Tensor:
if stride_order is None:
stride_order = x.dim_order()
x_ = from_dlpack(x, assumed_align=alignment)
for i in range(x.ndim):
if i != leading_dim and (static_modes is None or i not in static_modes):
x_ = x_.mark_compact_shape_dynamic(mode=i, stride_order=stride_order)
return x_
def make_tiled_copy_A(
copy_atom: cute.CopyAtom, tiled_mma: cute.TiledMma, swapAB: cutlass.Constexpr[bool] = False
) -> cute.TiledCopy:
if const_expr(swapAB):
return cute.make_tiled_copy_B(copy_atom, tiled_mma)
else:
return cute.make_tiled_copy_A(copy_atom, tiled_mma)
def make_tiled_copy_B(
copy_atom: cute.CopyAtom, tiled_mma: cute.TiledMma, swapAB: cutlass.Constexpr[bool] = False
) -> cute.TiledCopy:
if const_expr(swapAB):
return cute.make_tiled_copy_A(copy_atom, tiled_mma)
else:
return cute.make_tiled_copy_B(copy_atom, tiled_mma)
def mma_make_fragment_A(
smem: cute.Tensor, thr_mma: cute.core.ThrMma, swapAB: cutlass.Constexpr[bool] = False
) -> cute.Tensor:
if const_expr(swapAB):
return mma_make_fragment_B(smem, thr_mma)
else:
return thr_mma.make_fragment_A(thr_mma.partition_A(smem))
def mma_make_fragment_B(
smem: cute.Tensor, thr_mma: cute.core.ThrMma, swapAB: cutlass.Constexpr[bool] = False
) -> cute.Tensor:
if const_expr(swapAB):
return mma_make_fragment_A(smem, thr_mma)
else:
return thr_mma.make_fragment_B(thr_mma.partition_B(smem))
def get_smem_store_atom(
arch: cutlass.Constexpr[int], element_type: Type[cute.Numeric], transpose: bool = False
) -> cute.CopyAtom:
if const_expr(arch < 90 or element_type.width != 16):
return cute.make_copy_atom(
cute.nvgpu.CopyUniversalOp(),
element_type,
num_bits_per_copy=2 * element_type.width,
)
else:
return cute.make_copy_atom(
cute.nvgpu.warp.StMatrix8x8x16bOp(transpose=transpose, num_matrices=4),
element_type,
)
@cute.jit
def warp_reduce(
val: cute.TensorSSA | cute.Numeric,
op: Callable,
width: cutlass.Constexpr[int] = cute.arch.WARP_SIZE,
) -> cute.TensorSSA | cute.Numeric:
if const_expr(isinstance(val, cute.TensorSSA)):
res = cute.make_fragment(val.shape, val.dtype)
res.store(val)
for i in cutlass.range_constexpr(cute.size(val.shape)):
res[i] = warp_reduce(res[i], op, width)
return res.load()
else:
for i in cutlass.range_constexpr(int(math.log2(width))):
val = op(val, cute.arch.shuffle_sync_bfly(val, offset=1 << i))
return val
@dsl_user_op
def fmax(
a: float | Float32, b: float | Float32, c: float | Float32 | None = None, *, loc=None, ip=None
) -> Float32:
from cutlass import CUDA_VERSION
# * NVVM call based on nvvm version
if CUDA_VERSION.major == 12 and CUDA_VERSION.minor == 9:
# Old API: requires explicit result type as first positional argument
return Float32(
nvvm.fmax(
T.f32(),
Float32(a).ir_value(loc=loc, ip=ip),
Float32(b).ir_value(loc=loc, ip=ip),
c=Float32(c).ir_value(loc=loc, ip=ip) if c is not None else None,
loc=loc,
ip=ip,
)
)
else:
# New API: infers result type automatically
return Float32(
nvvm.fmax(
Float32(a).ir_value(loc=loc, ip=ip),
Float32(b).ir_value(loc=loc, ip=ip),
c=Float32(c).ir_value(loc=loc, ip=ip) if c is not None else None,
loc=loc,
ip=ip,
)
)
@cute.jit
def fmax_reduce(
x: cute.TensorSSA, init_val: float | Float32 | None = None, arch: cutlass.Constexpr[int] = 80
) -> Float32:
if const_expr(arch < 100 or cute.size(x.shape) % 8 != 0):
# if const_expr(init_val is None):
# init_val = -cutlass.Float32.if
# return x.reduce(cute.ReductionOp.MAX, init_val, 0)
res = cute.make_fragment(x.shape, Float32)
res.store(x)
# local_max = [res[0], res[1]]
# for i in cutlass.range_constexpr(2, cute.size(x.shape), 2):
# local_max[0] = fmax(local_max[0], res[i + 0])
# local_max[1] = fmax(local_max[1], res[i + 1])
# local_max[0] = fmax(local_max[0], local_max[1])
# return local_max[0] if const_expr(init_val is None) else fmax(local_max[0], init_val)
local_max = [res[0], res[1], res[2], res[3]]
for i in cutlass.range_constexpr(4, cute.size(x.shape), 4):
local_max[0] = fmax(local_max[0], res[i + 0])
local_max[1] = fmax(local_max[1], res[i + 1])
local_max[2] = fmax(local_max[2], res[i + 2])
local_max[3] = fmax(local_max[3], res[i + 3])
local_max[0] = fmax(local_max[0], local_max[1])
local_max[2] = fmax(local_max[2], local_max[3])
local_max[0] = fmax(local_max[0], local_max[2])
return local_max[0] if const_expr(init_val is None) else fmax(local_max[0], init_val)
else:
# [2025-06-15] x.reduce only seems to use 50% 3-input max and 50% 2-input max
# We instead force the 3-input max.
res = cute.make_fragment(x.shape, Float32)
res.store(x)
local_max_0 = (
fmax(init_val, res[0], res[1])
if const_expr(init_val is not None)
else fmax(res[0], res[1])
)
local_max = [
local_max_0,
fmax(res[2], res[3]),
fmax(res[4], res[5]),
fmax(res[6], res[7]),
]
for i in cutlass.range_constexpr(8, cute.size(x.shape), 8):
local_max[0] = fmax(local_max[0], res[i], res[i + 1])
local_max[1] = fmax(local_max[1], res[i + 2], res[i + 3])
local_max[2] = fmax(local_max[2], res[i + 4], res[i + 5])
local_max[3] = fmax(local_max[3], res[i + 6], res[i + 7])
local_max[0] = fmax(local_max[0], local_max[1])
return fmax(local_max[0], local_max[2], local_max[3])
@cute.jit
def fadd_reduce(
x: cute.TensorSSA, init_val: float | Float32 | None = None, arch: cutlass.Constexpr[int] = 80
) -> Float32:
if const_expr(arch < 100 or cute.size(x.shape) % 8 != 0):
if const_expr(init_val is None):
init_val = Float32.zero
return x.reduce(cute.ReductionOp.ADD, init_val, 0)
# res = cute.make_fragment(x.shape, Float32)
# res.store(x)
# local_sum = [res[0], res[1], res[2], res[3]]
# for i in cutlass.range_constexpr(4, cute.size(x.shape), 4):
# local_sum[0] += res[i + 0]
# local_sum[1] += res[i + 1]
# local_sum[2] += res[i + 2]
# local_sum[3] += res[i + 3]
# local_sum[0] += local_sum[1]
# local_sum[2] += local_sum[3]
# local_sum[0] += local_sum[2]
# return local_sum[0] if const_expr(init_val is None) else local_sum[0] + init_val
else:
res = cute.make_fragment(x.shape, Float32)
res.store(x)
local_sum_0 = (
cute.arch.add_packed_f32x2((init_val, 0.0), (res[0], res[1]))
# cute.arch.add_packed_f32x2((init_val / 2, init_val / 2), (res[0], res[1]))
if const_expr(init_val is not None)
else (res[0], res[1])
)
local_sum = [local_sum_0, (res[2], res[3]), (res[4], res[5]), (res[6], res[7])]
for i in cutlass.range_constexpr(8, cute.size(x.shape), 8):
local_sum[0] = cute.arch.add_packed_f32x2(local_sum[0], (res[i + 0], res[i + 1]))
local_sum[1] = cute.arch.add_packed_f32x2(local_sum[1], (res[i + 2], res[i + 3]))
local_sum[2] = cute.arch.add_packed_f32x2(local_sum[2], (res[i + 4], res[i + 5]))
local_sum[3] = cute.arch.add_packed_f32x2(local_sum[3], (res[i + 6], res[i + 7]))
local_sum[0] = cute.arch.add_packed_f32x2(local_sum[0], local_sum[1])
local_sum[2] = cute.arch.add_packed_f32x2(local_sum[2], local_sum[3])
local_sum[0] = cute.arch.add_packed_f32x2(local_sum[0], local_sum[2])
return local_sum[0][0] + local_sum[0][1]
@dsl_user_op
def atomic_add_fp32(a: float | Float32, gmem_ptr: cute.Pointer, *, loc=None, ip=None) -> None:
# gmem_ptr_i64 = gmem_ptr.toint(loc=loc, ip=ip).ir_value()
# # cache_hint = cutlass.Int64(0x12F0000000000000)
# llvm.inline_asm(
# None,
# [gmem_ptr_i64, Float32(a).ir_value(loc=loc, ip=ip)],
# # [gmem_ptr_i64, Float32(a).ir_value(loc=loc, ip=ip), cache_hint.ir_value()],
# "red.global.add.f32 [$0], $1;",
# # "red.global.add.L2::cache_hint.f32 [$0], $1, 0x12F0000000000000;",
# # "red.global.add.L2::cache_hint.f32 [$0], $1, $2;",
# "l,f",
# # "l,f,l",
# has_side_effects=True,
# is_align_stack=False,
# asm_dialect=llvm.AsmDialect.AD_ATT,
# )
nvvm.atomicrmw(
res=T.f32(), op=nvvm.AtomicOpKind.FADD, ptr=gmem_ptr.llvm_ptr, a=Float32(a).ir_value()
)
@dsl_user_op
def elem_pointer(x: cute.Tensor, coord: cute.Coord, *, loc=None, ip=None) -> cute.Pointer:
return x.iterator + cute.crd2idx(coord, x.layout, loc=loc, ip=ip)
@cute.jit
def predicate_k(tAcA: cute.Tensor, limit: cutlass.Int32) -> cute.Tensor:
# Only compute predicates for the "k" dimension. For the mn dimension, we will use "if"
tApA = cute.make_fragment(
cute.make_layout(
(cute.size(tAcA, mode=[0, 1]), cute.size(tAcA, mode=[1]), cute.size(tAcA, mode=[2])),
stride=(cute.size(tAcA, mode=[2]), 0, 1),
),
cutlass.Boolean,
)
for rest_v in cutlass.range_constexpr(tApA.shape[0]):
for rest_k in cutlass.range_constexpr(tApA.shape[2]):
tApA[rest_v, 0, rest_k] = cute.elem_less(tAcA[(0, rest_v), 0, rest_k][1], limit)
return tApA
def canonical_warp_group_idx(sync: bool = True) -> cutlass.Int32:
warp_group_idx = cute.arch.thread_idx()[0] // 128
if const_expr(sync):
warp_group_idx = cute.arch.make_warp_uniform(warp_group_idx)
return warp_group_idx
# @dsl_user_op
# def warp_vote_any_lt(a: float | Float32, b: float | Float32, *, loc=None, ip=None) -> cutlass.Boolean:
# mask = cutlass.Int32(-1)
# return cutlass.Boolean(
# llvm.inline_asm(
# T.i32(),
# [Float32(a).ir_value(loc=loc, ip=ip), Float32(b).ir_value(loc=loc, ip=ip), mask.ir_value(loc=loc, ip=ip)],
# ".pred p1, p2;\n"
# "setp.lt.f32 p1, $1, $2;\n"
# "vote.sync.any.pred p2, p1, $3;\n"
# "selp.u32 $0, 1, 0, p2;",
# # "selp.u32 $0, 1, 0, p1;",
# "=r,f,f,r",
# has_side_effects=False,
# is_align_stack=False,
# asm_dialect=llvm.AsmDialect.AD_ATT,
# )
# )
@cute.jit
def shuffle_sync(
value: cute.Numeric,
offset: cute.typing.Int,
width: cutlass.Constexpr[int] = cute.arch.WARP_SIZE,
) -> cute.Numeric:
assert value.width % 32 == 0, "value type must be a multiple of 32 bits"
# 1 -> 0b11111, 2 -> 0b11110, 4 -> 0b11100, 8 -> 0b11000, 16 -> 0b10000, 32 -> 0b00000
mask = cute.arch.WARP_SIZE - width
clamp = cute.arch.WARP_SIZE - 1
mask_and_clamp = mask << 8 | clamp
# important: need stride 1 and not 0 for recast_tensor to work
val = cute.make_rmem_tensor(cute.make_layout((1,), stride=(1,)), type(value))
val[0] = value
val_i32 = cute.recast_tensor(val, cutlass.Int32)
for i in cutlass.range_constexpr(cute.size(val_i32)):
val_i32[i] = cute.arch.shuffle_sync(val_i32[i], offset, mask_and_clamp=mask_and_clamp)
return val[0]
@dsl_user_op
def shr_u32(val: cutlass.Uint32, shift: cutlass.Uint32, *, loc=None, ip=None) -> cutlass.Uint32:
return cutlass.Uint32(
llvm.inline_asm(
T.i32(),
[
cutlass.Uint32(val).ir_value(loc=loc, ip=ip),
cutlass.Uint32(shift).ir_value(loc=loc, ip=ip),
],
"shr.s32 $0, $1, $2;",
"=r,r,r",
has_side_effects=False,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
)
@cute.jit
def warp_prefix_sum(val: cutlass.Int32, lane: Optional[cutlass.Int32] = None) -> cutlass.Int32:
if const_expr(lane is None):
lane = cute.arch.lane_idx()
# if cute.arch.thread_idx()[0] >= 128 and cute.arch.thread_idx()[0] < 128 + 32 and cute.arch.block_idx()[0] == 0: cute.printf("tidx = %d, val = %d", cute.arch.thread_idx()[0] % 32, val)
for i in cutlass.range_constexpr(int(math.log2(cute.arch.WARP_SIZE))):
offset = 1 << i
# Very important that we set mask_and_clamp to 0
partial_sum = cute.arch.shuffle_sync_up(val, offset=offset, mask_and_clamp=0)
if lane >= offset:
val += partial_sum
# if cute.arch.thread_idx()[0] >= 128 and cute.arch.thread_idx()[0] < 128 + 32 and cute.arch.block_idx()[0] == 0: cute.printf("tidx = %d, partial_sum = %d, val = %d", cute.arch.thread_idx()[0] % 32, partial_sum, val)
return val
@dsl_user_op
def cvt_f16x2_f32(
a: float | Float32, b: float | Float32, to_dtype: Type, *, loc=None, ip=None
) -> cutlass.Int32:
assert to_dtype in [cutlass.BFloat16, cutlass.Float16], "to_dtype must be BFloat16 or Float16"
return cutlass.Int32(
llvm.inline_asm(
T.i32(),
[Float32(a).ir_value(loc=loc, ip=ip), Float32(b).ir_value(loc=loc, ip=ip)],
f"cvt.rn.{'bf16x2' if to_dtype is cutlass.BFloat16 else 'f16x2'}.f32 $0, $2, $1;",
"=r,f,f",
has_side_effects=False,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
)
@overload
def cvt_f16(src: cute.Tensor, dst: cute.Tensor) -> None: ...
@overload
def cvt_f16(src: cute.Tensor, dtype: Type[cute.Numeric]) -> cute.Tensor: ...
@cute.jit
def cvt_f16(src: cute.Tensor, dst_or_dtype):
"""Convert Float32 tensor to Float16/BFloat16.
Args:
src: Source tensor with Float32 element type
dst_or_dtype: Either a destination tensor or a dtype (Float16/BFloat16)
Returns:
None if dst is a tensor, or a new tensor if dtype is provided
"""
if const_expr(isinstance(dst_or_dtype, type)):
# dtype variant: create new tensor and call the tensor variant
dtype = dst_or_dtype
dst = cute.make_fragment(src.shape, dtype)
cvt_f16(src, dst)
return dst
else:
# tensor variant: write to dst
dst = dst_or_dtype
assert cute.size(dst.shape) == cute.size(src.shape), "dst and src must have the same size"
assert cute.size(src.shape) % 2 == 0, "src must have an even number of elements"
assert dst.element_type in [cutlass.BFloat16, cutlass.Float16], (
"dst must be BFloat16 or Float16"
)
assert src.element_type is Float32, "src must be Float32"
dst_i32 = cute.recast_tensor(dst, cutlass.Int32)
assert cute.size(dst_i32.shape) * 2 == cute.size(src.shape)
for i in cutlass.range_constexpr(cute.size(dst_i32)):
dst_i32[i] = cvt_f16x2_f32(src[2 * i], src[2 * i + 1], dst.element_type)
@dsl_user_op
@cute.jit
def evaluate_polynomial(x: Float32, poly: Tuple[Float32, ...], *, loc=None, ip=None) -> Float32:
deg = len(poly) - 1
out = poly[deg]
for i in cutlass.range_constexpr(deg - 1, -1, -1):
out = out * x + poly[i]
return out
@dsl_user_op
@cute.jit
def evaluate_polynomial_2(
x: Float32, y: Float32, poly: Tuple[Float32, ...], *, loc=None, ip=None
) -> Tuple[Float32, Float32]:
deg = len(poly) - 1
out = (poly[deg], poly[deg])
for i in cutlass.range_constexpr(deg - 1, -1, -1):
out = cute.arch.fma_packed_f32x2(out, (x, y), (poly[i], poly[i]))
return out
@dsl_user_op
def add_round_down(x: float | Float32, y: float | Float32, *, loc=None, ip=None) -> Float32:
# There's probably a way to call llvm or nvvm to do this instead of ptx
return cutlass.Float32(
llvm.inline_asm(
T.f32(),
[Float32(x).ir_value(loc=loc, ip=ip), Float32(y).ir_value(loc=loc, ip=ip)],
"add.rm.ftz.f32 $0, $1, $2;",
"=f,f,f",
has_side_effects=False,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
)
@dsl_user_op
def combine_int_frac_ex2(x_rounded: Float32, frac_ex2: Float32, *, loc=None, ip=None) -> Float32:
return cutlass.Float32(
llvm.inline_asm(
T.f32(),
[
Float32(x_rounded).ir_value(loc=loc, ip=ip),
Float32(frac_ex2).ir_value(loc=loc, ip=ip),
],
"{\n\t"
".reg .s32 x_rounded_i, frac_ex_i, x_rounded_e, out_i;\n\t"
"mov.b32 x_rounded_i, $1;\n\t"
"mov.b32 frac_ex_i, $2;\n\t"
"shl.b32 x_rounded_e, x_rounded_i, 23;\n\t"
# add.u32 generates IMAD instruction and add.s32 generates LEA instruction
# IMAD uses the FMA pipeline and LEA uses the ALU pipeline, afaik
"add.s32 out_i, x_rounded_e, frac_ex_i;\n\t"
"mov.b32 $0, out_i;\n\t"
"}\n",
"=f,f,f",
has_side_effects=False,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
)
@dsl_user_op
def ex2_emulation(x: Float32, *, poly_degree: int = 3, loc=None, ip=None) -> Float32:
assert poly_degree in POLY_EX2, f"Polynomial degree {poly_degree} not supported"
# We assume x <= 127.0
fp32_round_int = float(2**23 + 2**22)
x_clamped = cute.arch.fmax(x, -127.0)
# We want to round down here, so that the fractional part is in [0, 1)
x_rounded = add_round_down(x_clamped, fp32_round_int, loc=loc, ip=ip)
# The integer floor of x is now in the last 8 bits of x_rounded
# We assume the next 2 ops round to nearest even. The rounding mode is important.
x_rounded_back = x_rounded - fp32_round_int
x_frac = x_clamped - x_rounded_back
x_frac_ex2 = evaluate_polynomial(x_frac, POLY_EX2[poly_degree], loc=loc, ip=ip)
return combine_int_frac_ex2(x_rounded, x_frac_ex2, loc=loc, ip=ip)
# TODO: check that the ex2_emulation_2 produces the same SASS as the ptx version
@dsl_user_op
def ex2_emulation_2(
x: Float32, y: Float32, *, poly_degree: int = 3, loc=None, ip=None
) -> Tuple[Float32, Float32]:
# We assume x <= 127.0 and y <= 127.0
fp32_round_int = float(2**23 + 2**22)
xy_clamped = (cute.arch.fmax(x, -127.0), cute.arch.fmax(y, -127.0))
# We want to round down here, so that the fractional part is in [0, 1)
xy_rounded = cute.arch.add_packed_f32x2(xy_clamped, (fp32_round_int, fp32_round_int), rnd="rm")
# The integer floor of x & y are now in the last 8 bits of xy_rounded
# We want the next 2 ops to round to nearest even. The rounding mode is important.
xy_rounded_back = quack.activation.sub_packed_f32x2(
xy_rounded, (fp32_round_int, fp32_round_int)
)
xy_frac = quack.activation.sub_packed_f32x2(xy_clamped, xy_rounded_back)
xy_frac_ex2 = evaluate_polynomial_2(*xy_frac, POLY_EX2[poly_degree], loc=loc, ip=ip)
x_out = combine_int_frac_ex2(xy_rounded[0], xy_frac_ex2[0], loc=loc, ip=ip)
y_out = combine_int_frac_ex2(xy_rounded[1], xy_frac_ex2[1], loc=loc, ip=ip)
return x_out, y_out
@dsl_user_op
def e2e_asm2(x: Float32, y: Float32, *, loc=None, ip=None) -> Tuple[Float32, Float32]:
out_f32x2 = llvm.inline_asm(
llvm.StructType.get_literal([T.f32(), T.f32()]),
[Float32(x).ir_value(loc=loc, ip=ip), Float32(y, loc=loc, ip=ip).ir_value()],
"{\n\t"
".reg .f32 f1, f2, f3, f4, f5, f6, f7;\n\t"
".reg .b64 l1, l2, l3, l4, l5, l6, l7, l8, l9, l10;\n\t"
".reg .s32 r1, r2, r3, r4, r5, r6, r7, r8;\n\t"
"max.ftz.f32 f1, $2, 0fC2FE0000;\n\t"
"max.ftz.f32 f2, $3, 0fC2FE0000;\n\t"
"mov.b64 l1, {f1, f2};\n\t"
"mov.f32 f3, 0f4B400000;\n\t"
"mov.b64 l2, {f3, f3};\n\t"
"add.rm.ftz.f32x2 l7, l1, l2;\n\t"
"sub.rn.ftz.f32x2 l8, l7, l2;\n\t"
"sub.rn.ftz.f32x2 l9, l1, l8;\n\t"
"mov.f32 f7, 0f3D9DF09D;\n\t"
"mov.b64 l6, {f7, f7};\n\t"
"mov.f32 f6, 0f3E6906A4;\n\t"
"mov.b64 l5, {f6, f6};\n\t"
"mov.f32 f5, 0f3F31F519;\n\t"
"mov.b64 l4, {f5, f5};\n\t"
"mov.f32 f4, 0f3F800000;\n\t"
"mov.b64 l3, {f4, f4};\n\t"
"fma.rn.ftz.f32x2 l10, l9, l6, l5;\n\t"
"fma.rn.ftz.f32x2 l10, l10, l9, l4;\n\t"
"fma.rn.ftz.f32x2 l10, l10, l9, l3;\n\t"
"mov.b64 {r1, r2}, l7;\n\t"
"mov.b64 {r3, r4}, l10;\n\t"
"shl.b32 r5, r1, 23;\n\t"
"add.s32 r7, r5, r3;\n\t"
"shl.b32 r6, r2, 23;\n\t"
"add.s32 r8, r6, r4;\n\t"
"mov.b32 $0, r7;\n\t"
"mov.b32 $1, r8;\n\t"
"}\n",
"=r,=r,f,f",
has_side_effects=False,
is_align_stack=False,
asm_dialect=llvm.AsmDialect.AD_ATT,
)
out0 = Float32(llvm.extractvalue(T.f32(), out_f32x2, [0], loc=loc, ip=ip))
out1 = Float32(llvm.extractvalue(T.f32(), out_f32x2, [1], loc=loc, ip=ip))
return out0, out1
@dsl_user_op
def domain_offset_aligned(
coord: cute.Coord, tensor: cute.Tensor, *, loc=None, ip=None
) -> cute.Tensor:
assert isinstance(tensor.iterator, cute.Pointer)
# We assume that applying the offset does not change the pointer alignment
new_ptr = cute.make_ptr(
tensor.element_type,
elem_pointer(tensor, coord).toint(),
tensor.memspace,
assumed_align=tensor.iterator.alignment,
)
return cute.make_tensor(new_ptr, tensor.layout)
@cute.jit
def scalar_to_ssa(a: cute.Numeric, dtype) -> cute.TensorSSA:
"""Convert a scalar to a cute TensorSSA of shape (1,) and given dtype"""
vec = cute.make_fragment(1, dtype)
vec[0] = a
return vec.load()
def ssa_to_scalar(val):
"""Could inline but nice for reflecting the above api"""
return val[0]
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/cute/utils.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 604,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Dao-AILab/flash-attention:flash_attn/utils/testing.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
import math
from typing import Optional
import torch
from einops import rearrange, repeat
from flash_attn.bert_padding import pad_input, unpad_input
def generate_random_padding_mask(max_seqlen, batch_size, device, mode="random", zero_lengths=False):
assert mode in ["full", "random", "third"]
if mode == "full":
lengths = torch.full((batch_size, 1), max_seqlen, device=device, dtype=torch.int32)
elif mode == "random":
lengths = torch.randint(
max(0 if zero_lengths else 1, max_seqlen - 20), max_seqlen + 1, (batch_size, 1), device=device
)
elif mode == "third":
lengths = torch.randint(max_seqlen // 3, max_seqlen + 1, (batch_size, 1), device=device)
if zero_lengths:
# Generate zero-lengths every 5 batches and the last batch.
for i in range(batch_size):
if i % 5 == 0:
lengths[i] = 0
lengths[-1] = 0
padding_mask = (
repeat(torch.arange(max_seqlen, device=device), "s -> b s", b=batch_size) < lengths
)
return padding_mask
def generate_qkv(
q, k, v, query_padding_mask=None, key_padding_mask=None, qv=None, kvpacked=False, qkvpacked=False,
query_unused_mask=None, key_unused_mask=None,
):
"""
Arguments:
q: (batch_size, seqlen_q, nheads, d)
k: (batch_size, seqlen_k, nheads_k, d)
v: (batch_size, seqlen_k, nheads_k, d_v)
query_padding_mask: (batch_size, seqlen), bool
key_padding_mask: (batch_size, seqlen), bool
"""
assert not (kvpacked and qkvpacked)
batch_size, seqlen_q, nheads, d = q.shape
d_v = v.shape[-1]
_, seqlen_k, nheads_k, _ = k.shape
assert k.shape == (batch_size, seqlen_k, nheads_k, d)
assert v.shape == (batch_size, seqlen_k, nheads_k, d_v)
if query_unused_mask is not None or key_unused_mask is not None:
assert not kvpacked
assert not qkvpacked
if query_padding_mask is not None:
q_unpad, indices_q, cu_seqlens_q, max_seqlen_q, seqused_q = unpad_input(
q, query_padding_mask, query_unused_mask
)
output_pad_fn = lambda output_unpad: pad_input(
output_unpad, indices_q, batch_size, seqlen_q
)
qv_unpad = rearrange(qv, "b s ... -> (b s) ...")[indices_q] if qv is not None else None
else:
q_unpad = rearrange(q, "b s h d -> (b s) h d")
cu_seqlens_q = torch.arange(
0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q_unpad.device
)
seqused_q = None
max_seqlen_q = seqlen_q
output_pad_fn = lambda output_unpad: rearrange(
output_unpad, "(b s) h d -> b s h d", b=batch_size
)
qv_unpad = rearrange(qv, "b s ... -> (b s) ...") if qv is not None else None
if key_padding_mask is not None:
k_unpad, indices_k, cu_seqlens_k, max_seqlen_k, seqused_k = unpad_input(
k, key_padding_mask, key_unused_mask
)
v_unpad, *rest = unpad_input(v, key_padding_mask, key_unused_mask)
else:
k_unpad = rearrange(k, "b s h d -> (b s) h d")
v_unpad = rearrange(v, "b s h d -> (b s) h d")
cu_seqlens_k = torch.arange(
0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=k_unpad.device
)
seqused_k = None
max_seqlen_k = seqlen_k
if qkvpacked:
assert (query_padding_mask == key_padding_mask).all()
assert nheads == nheads_k
qkv_unpad = torch.stack([q_unpad, k_unpad, v_unpad], dim=1)
qkv = torch.stack([q, k, v], dim=2)
if query_padding_mask is not None:
dqkv_pad_fn = lambda dqkv_unpad: pad_input(dqkv_unpad, indices_q, batch_size, seqlen_q)
else:
dqkv_pad_fn = lambda dqkv_unpad: rearrange(
dqkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
)
return (
qkv_unpad.detach().requires_grad_(),
cu_seqlens_q,
max_seqlen_q,
qkv.detach().requires_grad_(),
output_pad_fn,
dqkv_pad_fn,
)
elif kvpacked:
kv_unpad = torch.stack([k_unpad, v_unpad], dim=1)
kv = torch.stack([k, v], dim=2)
dq_pad_fn = output_pad_fn
if key_padding_mask is not None:
dkv_pad_fn = lambda dkv_unpad: pad_input(dkv_unpad, indices_k, batch_size, seqlen_k)
else:
dkv_pad_fn = lambda dkv_unpad: rearrange(
dkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
)
return (
q_unpad.detach().requires_grad_(),
kv_unpad.detach().requires_grad_(),
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
q.detach().requires_grad_(),
kv.detach().requires_grad_(),
output_pad_fn,
dq_pad_fn,
dkv_pad_fn,
)
else:
dq_pad_fn = output_pad_fn
if key_padding_mask is not None:
dk_pad_fn = lambda dk_unpad: pad_input(dk_unpad, indices_k, batch_size, seqlen_k)
else:
dk_pad_fn = lambda dk_unpad: rearrange(dk_unpad, "(b s) h d -> b s h d", b=batch_size)
return (
q_unpad.detach().requires_grad_(),
k_unpad.detach().requires_grad_(),
v_unpad.detach().requires_grad_(),
qv_unpad.detach() if qv is not None else None,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
q.detach().requires_grad_(),
k.detach().requires_grad_(),
v.detach().requires_grad_(),
qv.detach() if qv is not None else None,
output_pad_fn,
dq_pad_fn,
dk_pad_fn,
)
def construct_local_mask(
seqlen_q,
seqlen_k,
window_size=(None, None),
sink_token_length=0,
query_padding_mask=None,
key_padding_mask=None,
key_leftpad=None,
device=None,
):
row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
if key_leftpad is not None:
key_leftpad = rearrange(key_leftpad, "b -> b 1 1 1")
col_idx = repeat(col_idx, "s -> b 1 1 s", b=key_leftpad.shape[0])
col_idx = torch.where(col_idx >= key_leftpad, col_idx - key_leftpad, 2**32)
sk = (
seqlen_k
if key_padding_mask is None
else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
)
sq = (
seqlen_q
if query_padding_mask is None
else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
)
if window_size[0] is None:
return col_idx > row_idx + sk - sq + window_size[1]
else:
sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
return torch.logical_or(
col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk),
torch.logical_and(col_idx < row_idx + sk - sq - window_size[0], col_idx >= sink_token_length),
)
def construct_chunk_mask(
seqlen_q,
seqlen_k,
attention_chunk,
query_padding_mask=None,
key_padding_mask=None,
key_leftpad=None,
device=None,
):
row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
if key_leftpad is not None:
key_leftpad = rearrange(key_leftpad, "b -> b 1 1 1")
col_idx = repeat(col_idx, "s -> b 1 1 s", b=key_leftpad.shape[0])
col_idx = torch.where(col_idx >= key_leftpad, col_idx - key_leftpad, 2**32)
sk = (
seqlen_k
if key_padding_mask is None
else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
)
sq = (
seqlen_q
if query_padding_mask is None
else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
)
sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
# Subtract remainder instead of divide and then multiply to take care of negative values
col_limit_left_chunk = row_idx + sk - sq - (row_idx + sk - sq) % attention_chunk
return torch.logical_or(
col_idx < col_limit_left_chunk, col_idx >= col_limit_left_chunk + attention_chunk
)
def attention_ref(
q,
k,
v,
query_padding_mask=None,
key_padding_mask=None,
key_leftpad=None,
attn_bias=None,
dropout_p=0.0,
dropout_mask=None,
causal=False,
qv=None,
q_descale=None, k_descale=None, v_descale=None,
window_size=(None, None),
attention_chunk=0,
sink_token_length=0,
learnable_sink: Optional[torch.Tensor] = None,
softcap=0.0,
upcast=True,
reorder_ops=False,
intermediate_dtype=None,
):
"""
Arguments:
q: (batch_size, seqlen_q, nheads, head_dim)
k: (batch_size, seqlen_k, nheads, head_dim)
v: (batch_size, seqlen_k, nheads, head_dim_v)
qv: (batch_size, seqlen_q, nheads, head_dim_v)
query_padding_mask: (batch_size, seqlen_q)
key_padding_mask: (batch_size, seqlen_k)
attn_bias: broadcastable to (batch_size, nheads, seqlen_q, seqlen_k)
dropout_p: float
dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k)
causal: whether to apply causal masking
upcast: whether to cast all inputs to fp32, do all computation in fp32, then cast
output back to fp16/bf16.
reorder_ops: whether to change the order of operations (scaling k instead of scaling k, etc.)
without changing the math. This is to estimate the numerical error from operation
reordering.
Output:
output: (batch_size, seqlen_q, nheads, head_dim_v)
attention: (batch_size, nheads, seqlen_q, seqlen_k), softmax after dropout
"""
if causal:
window_size = (window_size[0], 0)
dtype_og = q.dtype
if upcast:
q, k, v = q.float(), k.float(), v.float()
qv = qv.float() if qv is not None else None
if q_descale is not None:
q_descale = repeat(q_descale, "b h -> b 1 (h g) 1", g=q.shape[2] // k.shape[2])
q = (q.float() * q_descale).to(q.dtype)
qv = (qv.float() * q_descale).to(qv.dtype) if qv is not None else None
if k_descale is not None:
k = (k.float() * rearrange(k_descale, "b h -> b 1 h 1")).to(dtype=k.dtype)
if v_descale is not None:
v = (v.float() * rearrange(v_descale, "b h -> b 1 h 1")).to(dtype=v.dtype)
seqlen_q, seqlen_k = q.shape[1], k.shape[1]
k = repeat(k, "b s h d -> b s (h g) d", g=q.shape[2] // k.shape[2])
v = repeat(v, "b s h d -> b s (h g) d", g=q.shape[2] // v.shape[2])
d = q.shape[-1]
dv = v.shape[-1]
softmax_scale = 1.0 / math.sqrt(d if qv is None else d + dv)
if not reorder_ops:
scores = torch.einsum("bthd,bshd->bhts", q * softmax_scale, k)
else:
scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
if qv is not None:
scores = scores + torch.einsum("bthd,bshd->bhts", qv * softmax_scale, v)
if softcap > 0:
scores = torch.tanh(scores / softcap) * softcap
if key_padding_mask is not None:
scores.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
local_mask = None
if window_size[0] is not None or window_size[1] is not None:
local_mask = construct_local_mask(
seqlen_q,
seqlen_k,
window_size,
sink_token_length,
query_padding_mask,
key_padding_mask,
key_leftpad=key_leftpad,
device=q.device,
)
if attention_chunk > 0:
chunk_mask = construct_chunk_mask(
seqlen_q,
seqlen_k,
attention_chunk,
query_padding_mask,
key_padding_mask,
key_leftpad=key_leftpad,
device=q.device,
)
local_mask = torch.logical_or(local_mask, chunk_mask) if local_mask is not None else chunk_mask
if local_mask is not None:
scores.masked_fill_(local_mask, float("-inf"))
if attn_bias is not None:
scores = scores + attn_bias
if learnable_sink is None:
attention = torch.softmax(scores, dim=-1).to(v.dtype)
else:
scores_fp32 = scores.to(torch.float32)
logits_max = torch.amax(scores_fp32, dim=-1, keepdim=True)
learnable_sink = rearrange(learnable_sink, "h -> h 1 1")
logits_or_sinks_max = torch.maximum(learnable_sink, logits_max)
unnormalized_scores = torch.exp(scores_fp32 - logits_or_sinks_max)
normalizer = unnormalized_scores.sum(dim=-1, keepdim=True) + torch.exp(learnable_sink - logits_or_sinks_max)
attention = (unnormalized_scores / normalizer).to(v.dtype)
# We want to mask here so that the attention matrix doesn't have any NaNs
# Otherwise we'll get NaN in dV
if query_padding_mask is not None:
attention = attention.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
# Without this we might get NaN in dv
if key_padding_mask is not None:
attention = attention.masked_fill(rearrange(~key_padding_mask, "b s -> b 1 1 s"), 0.0)
# Some rows might be completely masked out so we fill them with zero instead of NaN
if local_mask is not None:
attention = attention.masked_fill(torch.all(local_mask, dim=-1, keepdim=True), 0.0)
dropout_scaling = 1.0 / (1 - dropout_p)
# attention_drop = attention.masked_fill(~dropout_mask, 0.0) * dropout_scaling
# output = torch.einsum('bhts,bshd->bthd', attention_drop , v)
if dropout_mask is not None:
attention_drop = attention.masked_fill(~dropout_mask, 0.0)
else:
attention_drop = attention
if intermediate_dtype is not None:
attention_drop = attention_drop.to(intermediate_dtype).to(attention_drop.dtype)
output = torch.einsum("bhts,bshd->bthd", attention_drop, v * dropout_scaling)
if query_padding_mask is not None:
output.masked_fill_(rearrange(~query_padding_mask, "b s -> b s 1 1"), 0.0)
return output.to(dtype=dtype_og), attention.to(dtype=dtype_og)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "flash_attn/utils/testing.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.