|
|
from typing import Optional |
|
|
|
|
|
import torch |
|
|
|
|
|
|
|
|
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: |
|
|
""" |
|
|
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, |
|
|
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) |
|
|
""" |
|
|
batch, num_key_value_heads, slen, head_dim = hidden_states.shape |
|
|
if n_rep == 1: |
|
|
return hidden_states |
|
|
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) |
|
|
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) |
|
|
|
|
|
|
|
|
def sdpa_attention_paged_forward( |
|
|
module: torch.nn.Module, |
|
|
query: torch.Tensor, |
|
|
key: torch.Tensor, |
|
|
value: torch.Tensor, |
|
|
attention_mask: Optional[torch.Tensor], |
|
|
dropout: float = 0.0, |
|
|
scaling: Optional[float] = None, |
|
|
**kwargs, |
|
|
) -> tuple[torch.Tensor, None]: |
|
|
|
|
|
cache = kwargs.pop("cache", None) |
|
|
if cache is not None: |
|
|
|
|
|
key, value = cache.update(key, value, module.layer_idx, **kwargs) |
|
|
key = key.transpose(0, 1).unsqueeze(0) |
|
|
value = value.transpose(0, 1).unsqueeze(0) |
|
|
|
|
|
|
|
|
if hasattr(module, "num_key_value_groups"): |
|
|
key = repeat_kv(key, module.num_key_value_groups) |
|
|
value = repeat_kv(value, module.num_key_value_groups) |
|
|
|
|
|
|
|
|
causal_mask = attention_mask |
|
|
|
|
|
|
|
|
query = query.contiguous() |
|
|
key = key.contiguous() |
|
|
value = value.contiguous() |
|
|
attn_output = torch.nn.functional.scaled_dot_product_attention( |
|
|
query, |
|
|
key, |
|
|
value, |
|
|
attn_mask=causal_mask, |
|
|
dropout_p=dropout, |
|
|
scale=scaling, |
|
|
|
|
|
is_causal=False, |
|
|
) |
|
|
attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
|
|
|
return attn_output, None |
|
|
|