Build uploaded using `kernels`.
Browse files- .gitattributes +3 -0
- build/torch210-metal-aarch64-darwin/__init__.py +11 -0
- build/torch210-metal-aarch64-darwin/_custom_ops.py +117 -0
- build/torch210-metal-aarch64-darwin/_metal_flash_sdpa_4eea15a.abi3.so +3 -0
- build/torch210-metal-aarch64-darwin/_ops.py +9 -0
- build/torch210-metal-aarch64-darwin/metadata.json +3 -0
- build/torch210-metal-aarch64-darwin/metal_flash_sdpa/__init__.py +26 -0
- build/torch28-metal-aarch64-darwin/__init__.py +11 -0
- build/torch28-metal-aarch64-darwin/_custom_ops.py +117 -0
- build/torch28-metal-aarch64-darwin/_metal_flash_sdpa_4eea15a.abi3.so +3 -0
- build/torch28-metal-aarch64-darwin/_ops.py +9 -0
- build/torch28-metal-aarch64-darwin/metadata.json +3 -0
- build/torch28-metal-aarch64-darwin/metal_flash_sdpa/__init__.py +26 -0
- build/torch29-metal-aarch64-darwin/__init__.py +11 -0
- build/torch29-metal-aarch64-darwin/_custom_ops.py +117 -0
- build/torch29-metal-aarch64-darwin/_metal_flash_sdpa_4eea15a.abi3.so +3 -0
- build/torch29-metal-aarch64-darwin/_ops.py +9 -0
- build/torch29-metal-aarch64-darwin/metadata.json +3 -0
- build/torch29-metal-aarch64-darwin/metal_flash_sdpa/__init__.py +26 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
build/torch210-metal-aarch64-darwin/_metal_flash_sdpa_4eea15a.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
build/torch28-metal-aarch64-darwin/_metal_flash_sdpa_4eea15a.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
build/torch29-metal-aarch64-darwin/_metal_flash_sdpa_4eea15a.abi3.so filter=lfs diff=lfs merge=lfs -text
|
build/torch210-metal-aarch64-darwin/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._custom_ops import (
|
| 2 |
+
flash_attention_varlen,
|
| 3 |
+
flash_attn_varlen_func,
|
| 4 |
+
)
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
__all__ = [
|
| 8 |
+
"flash_attention_varlen",
|
| 9 |
+
"flash_attn_varlen_func",
|
| 10 |
+
"ops",
|
| 11 |
+
]
|
build/torch210-metal-aarch64-darwin/_custom_ops.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def flash_attention_varlen(
|
| 9 |
+
out: torch.Tensor,
|
| 10 |
+
query: torch.Tensor,
|
| 11 |
+
key: torch.Tensor,
|
| 12 |
+
value: torch.Tensor,
|
| 13 |
+
cu_seqlens_q: torch.Tensor,
|
| 14 |
+
cu_seqlens_k: torch.Tensor,
|
| 15 |
+
max_seqlen_q: int,
|
| 16 |
+
max_seqlen_k: int,
|
| 17 |
+
do_causal: bool = False,
|
| 18 |
+
scale: Optional[float] = None,
|
| 19 |
+
softcapping: float = 1.0,
|
| 20 |
+
) -> None:
|
| 21 |
+
"""
|
| 22 |
+
Flash Attention with variable-length sequences.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
out: Output tensor of shape [total_q_tokens, num_heads, head_dim]
|
| 26 |
+
query: Query tensor of shape [total_q_tokens, num_heads, head_dim]
|
| 27 |
+
key: Key tensor of shape [total_k_tokens, num_heads_kv, head_dim]
|
| 28 |
+
value: Value tensor of shape [total_k_tokens, num_heads_kv, head_dim]
|
| 29 |
+
cu_seqlens_q: Cumulative sequence lengths for queries, shape [batch_size + 1], dtype must be torch.int32
|
| 30 |
+
cu_seqlens_k: Cumulative sequence lengths for keys, shape [batch_size + 1], dtype must be torch.int32
|
| 31 |
+
max_seqlen_q: Maximum sequence length in the query batch
|
| 32 |
+
max_seqlen_k: Maximum sequence length in the key batch
|
| 33 |
+
do_causal: Whether to apply causal masking
|
| 34 |
+
scale: Attention scale factor (default: 1/sqrt(head_dim))
|
| 35 |
+
softcapping: Softcapping value (default: 1.0, must be 1.0 for this implementation)
|
| 36 |
+
|
| 37 |
+
Note:
|
| 38 |
+
- cu_seqlens_q and cu_seqlens_k must have dtype torch.int32 for Metal compatibility
|
| 39 |
+
- Supported head dimensions: 32, 64, 72, 80, 96, 128
|
| 40 |
+
- Masks are not supported
|
| 41 |
+
"""
|
| 42 |
+
if scale is None:
|
| 43 |
+
scale = query.shape[-1] ** -0.5
|
| 44 |
+
|
| 45 |
+
ops.flash_attention_varlen(
|
| 46 |
+
out,
|
| 47 |
+
query,
|
| 48 |
+
key,
|
| 49 |
+
value,
|
| 50 |
+
cu_seqlens_q,
|
| 51 |
+
cu_seqlens_k,
|
| 52 |
+
max_seqlen_q,
|
| 53 |
+
max_seqlen_k,
|
| 54 |
+
do_causal,
|
| 55 |
+
scale,
|
| 56 |
+
softcapping,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
def flash_attn_varlen_func(
|
| 60 |
+
q: torch.Tensor,
|
| 61 |
+
k: torch.Tensor,
|
| 62 |
+
v: torch.Tensor,
|
| 63 |
+
cu_seqlens_q: torch.Tensor,
|
| 64 |
+
cu_seqlens_k: torch.Tensor,
|
| 65 |
+
max_seqlen_q: int,
|
| 66 |
+
max_seqlen_k: int,
|
| 67 |
+
dropout_p: float = 0.0,
|
| 68 |
+
softmax_scale: Optional[float] = None,
|
| 69 |
+
causal: bool = False,
|
| 70 |
+
window_size: tuple = (-1, -1),
|
| 71 |
+
alibi_slopes: Optional[torch.Tensor] = None,
|
| 72 |
+
deterministic: bool = False,
|
| 73 |
+
return_attn_probs: bool = False,
|
| 74 |
+
) -> torch.Tensor:
|
| 75 |
+
"""
|
| 76 |
+
Flash Attention function with API compatible with the original Flash Attention.
|
| 77 |
+
|
| 78 |
+
Note: This implementation does not support:
|
| 79 |
+
- dropout
|
| 80 |
+
- window attention
|
| 81 |
+
- alibi slopes
|
| 82 |
+
- returning attention probabilities
|
| 83 |
+
"""
|
| 84 |
+
if dropout_p > 0:
|
| 85 |
+
raise NotImplementedError("Dropout is not supported in this implementation")
|
| 86 |
+
if window_size != (-1, -1):
|
| 87 |
+
raise NotImplementedError("Window attention is not supported")
|
| 88 |
+
if alibi_slopes is not None:
|
| 89 |
+
raise NotImplementedError("ALiBi is not supported")
|
| 90 |
+
if return_attn_probs:
|
| 91 |
+
raise NotImplementedError("Returning attention probabilities is not supported")
|
| 92 |
+
|
| 93 |
+
# Create output tensor
|
| 94 |
+
out = torch.empty_like(q)
|
| 95 |
+
|
| 96 |
+
# Call the kernel
|
| 97 |
+
flash_attention_varlen(
|
| 98 |
+
out=out,
|
| 99 |
+
query=q,
|
| 100 |
+
key=k,
|
| 101 |
+
value=v,
|
| 102 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 103 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 104 |
+
max_seqlen_q=max_seqlen_q,
|
| 105 |
+
max_seqlen_k=max_seqlen_k,
|
| 106 |
+
do_causal=causal,
|
| 107 |
+
scale=softmax_scale,
|
| 108 |
+
softcapping=1.0,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
return out
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
__all__ = [
|
| 115 |
+
"flash_attention_varlen",
|
| 116 |
+
"flash_attn_varlen_func",
|
| 117 |
+
]
|
build/torch210-metal-aarch64-darwin/_metal_flash_sdpa_4eea15a.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b862b51d85648b95df092c9981243d5e9c8a5e2e352c1d2a7ddbf7cc50adb1c
|
| 3 |
+
size 732864
|
build/torch210-metal-aarch64-darwin/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _metal_flash_sdpa_4eea15a
|
| 3 |
+
ops = torch.ops._metal_flash_sdpa_4eea15a
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_metal_flash_sdpa_4eea15a::{op_name}"
|
build/torch210-metal-aarch64-darwin/metadata.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"python-depends": []
|
| 3 |
+
}
|
build/torch210-metal-aarch64-darwin/metal_flash_sdpa/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch28-metal-aarch64-darwin/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._custom_ops import (
|
| 2 |
+
flash_attention_varlen,
|
| 3 |
+
flash_attn_varlen_func,
|
| 4 |
+
)
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
__all__ = [
|
| 8 |
+
"flash_attention_varlen",
|
| 9 |
+
"flash_attn_varlen_func",
|
| 10 |
+
"ops",
|
| 11 |
+
]
|
build/torch28-metal-aarch64-darwin/_custom_ops.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def flash_attention_varlen(
|
| 9 |
+
out: torch.Tensor,
|
| 10 |
+
query: torch.Tensor,
|
| 11 |
+
key: torch.Tensor,
|
| 12 |
+
value: torch.Tensor,
|
| 13 |
+
cu_seqlens_q: torch.Tensor,
|
| 14 |
+
cu_seqlens_k: torch.Tensor,
|
| 15 |
+
max_seqlen_q: int,
|
| 16 |
+
max_seqlen_k: int,
|
| 17 |
+
do_causal: bool = False,
|
| 18 |
+
scale: Optional[float] = None,
|
| 19 |
+
softcapping: float = 1.0,
|
| 20 |
+
) -> None:
|
| 21 |
+
"""
|
| 22 |
+
Flash Attention with variable-length sequences.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
out: Output tensor of shape [total_q_tokens, num_heads, head_dim]
|
| 26 |
+
query: Query tensor of shape [total_q_tokens, num_heads, head_dim]
|
| 27 |
+
key: Key tensor of shape [total_k_tokens, num_heads_kv, head_dim]
|
| 28 |
+
value: Value tensor of shape [total_k_tokens, num_heads_kv, head_dim]
|
| 29 |
+
cu_seqlens_q: Cumulative sequence lengths for queries, shape [batch_size + 1], dtype must be torch.int32
|
| 30 |
+
cu_seqlens_k: Cumulative sequence lengths for keys, shape [batch_size + 1], dtype must be torch.int32
|
| 31 |
+
max_seqlen_q: Maximum sequence length in the query batch
|
| 32 |
+
max_seqlen_k: Maximum sequence length in the key batch
|
| 33 |
+
do_causal: Whether to apply causal masking
|
| 34 |
+
scale: Attention scale factor (default: 1/sqrt(head_dim))
|
| 35 |
+
softcapping: Softcapping value (default: 1.0, must be 1.0 for this implementation)
|
| 36 |
+
|
| 37 |
+
Note:
|
| 38 |
+
- cu_seqlens_q and cu_seqlens_k must have dtype torch.int32 for Metal compatibility
|
| 39 |
+
- Supported head dimensions: 32, 64, 72, 80, 96, 128
|
| 40 |
+
- Masks are not supported
|
| 41 |
+
"""
|
| 42 |
+
if scale is None:
|
| 43 |
+
scale = query.shape[-1] ** -0.5
|
| 44 |
+
|
| 45 |
+
ops.flash_attention_varlen(
|
| 46 |
+
out,
|
| 47 |
+
query,
|
| 48 |
+
key,
|
| 49 |
+
value,
|
| 50 |
+
cu_seqlens_q,
|
| 51 |
+
cu_seqlens_k,
|
| 52 |
+
max_seqlen_q,
|
| 53 |
+
max_seqlen_k,
|
| 54 |
+
do_causal,
|
| 55 |
+
scale,
|
| 56 |
+
softcapping,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
def flash_attn_varlen_func(
|
| 60 |
+
q: torch.Tensor,
|
| 61 |
+
k: torch.Tensor,
|
| 62 |
+
v: torch.Tensor,
|
| 63 |
+
cu_seqlens_q: torch.Tensor,
|
| 64 |
+
cu_seqlens_k: torch.Tensor,
|
| 65 |
+
max_seqlen_q: int,
|
| 66 |
+
max_seqlen_k: int,
|
| 67 |
+
dropout_p: float = 0.0,
|
| 68 |
+
softmax_scale: Optional[float] = None,
|
| 69 |
+
causal: bool = False,
|
| 70 |
+
window_size: tuple = (-1, -1),
|
| 71 |
+
alibi_slopes: Optional[torch.Tensor] = None,
|
| 72 |
+
deterministic: bool = False,
|
| 73 |
+
return_attn_probs: bool = False,
|
| 74 |
+
) -> torch.Tensor:
|
| 75 |
+
"""
|
| 76 |
+
Flash Attention function with API compatible with the original Flash Attention.
|
| 77 |
+
|
| 78 |
+
Note: This implementation does not support:
|
| 79 |
+
- dropout
|
| 80 |
+
- window attention
|
| 81 |
+
- alibi slopes
|
| 82 |
+
- returning attention probabilities
|
| 83 |
+
"""
|
| 84 |
+
if dropout_p > 0:
|
| 85 |
+
raise NotImplementedError("Dropout is not supported in this implementation")
|
| 86 |
+
if window_size != (-1, -1):
|
| 87 |
+
raise NotImplementedError("Window attention is not supported")
|
| 88 |
+
if alibi_slopes is not None:
|
| 89 |
+
raise NotImplementedError("ALiBi is not supported")
|
| 90 |
+
if return_attn_probs:
|
| 91 |
+
raise NotImplementedError("Returning attention probabilities is not supported")
|
| 92 |
+
|
| 93 |
+
# Create output tensor
|
| 94 |
+
out = torch.empty_like(q)
|
| 95 |
+
|
| 96 |
+
# Call the kernel
|
| 97 |
+
flash_attention_varlen(
|
| 98 |
+
out=out,
|
| 99 |
+
query=q,
|
| 100 |
+
key=k,
|
| 101 |
+
value=v,
|
| 102 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 103 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 104 |
+
max_seqlen_q=max_seqlen_q,
|
| 105 |
+
max_seqlen_k=max_seqlen_k,
|
| 106 |
+
do_causal=causal,
|
| 107 |
+
scale=softmax_scale,
|
| 108 |
+
softcapping=1.0,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
return out
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
__all__ = [
|
| 115 |
+
"flash_attention_varlen",
|
| 116 |
+
"flash_attn_varlen_func",
|
| 117 |
+
]
|
build/torch28-metal-aarch64-darwin/_metal_flash_sdpa_4eea15a.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:12db93fedacb148319da10ecd7e2cc6ace886e07c7def14aff65c262a2906af7
|
| 3 |
+
size 732064
|
build/torch28-metal-aarch64-darwin/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _metal_flash_sdpa_4eea15a
|
| 3 |
+
ops = torch.ops._metal_flash_sdpa_4eea15a
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_metal_flash_sdpa_4eea15a::{op_name}"
|
build/torch28-metal-aarch64-darwin/metadata.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"python-depends": []
|
| 3 |
+
}
|
build/torch28-metal-aarch64-darwin/metal_flash_sdpa/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch29-metal-aarch64-darwin/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ._custom_ops import (
|
| 2 |
+
flash_attention_varlen,
|
| 3 |
+
flash_attn_varlen_func,
|
| 4 |
+
)
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
__all__ = [
|
| 8 |
+
"flash_attention_varlen",
|
| 9 |
+
"flash_attn_varlen_func",
|
| 10 |
+
"ops",
|
| 11 |
+
]
|
build/torch29-metal-aarch64-darwin/_custom_ops.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from ._ops import ops
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def flash_attention_varlen(
|
| 9 |
+
out: torch.Tensor,
|
| 10 |
+
query: torch.Tensor,
|
| 11 |
+
key: torch.Tensor,
|
| 12 |
+
value: torch.Tensor,
|
| 13 |
+
cu_seqlens_q: torch.Tensor,
|
| 14 |
+
cu_seqlens_k: torch.Tensor,
|
| 15 |
+
max_seqlen_q: int,
|
| 16 |
+
max_seqlen_k: int,
|
| 17 |
+
do_causal: bool = False,
|
| 18 |
+
scale: Optional[float] = None,
|
| 19 |
+
softcapping: float = 1.0,
|
| 20 |
+
) -> None:
|
| 21 |
+
"""
|
| 22 |
+
Flash Attention with variable-length sequences.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
out: Output tensor of shape [total_q_tokens, num_heads, head_dim]
|
| 26 |
+
query: Query tensor of shape [total_q_tokens, num_heads, head_dim]
|
| 27 |
+
key: Key tensor of shape [total_k_tokens, num_heads_kv, head_dim]
|
| 28 |
+
value: Value tensor of shape [total_k_tokens, num_heads_kv, head_dim]
|
| 29 |
+
cu_seqlens_q: Cumulative sequence lengths for queries, shape [batch_size + 1], dtype must be torch.int32
|
| 30 |
+
cu_seqlens_k: Cumulative sequence lengths for keys, shape [batch_size + 1], dtype must be torch.int32
|
| 31 |
+
max_seqlen_q: Maximum sequence length in the query batch
|
| 32 |
+
max_seqlen_k: Maximum sequence length in the key batch
|
| 33 |
+
do_causal: Whether to apply causal masking
|
| 34 |
+
scale: Attention scale factor (default: 1/sqrt(head_dim))
|
| 35 |
+
softcapping: Softcapping value (default: 1.0, must be 1.0 for this implementation)
|
| 36 |
+
|
| 37 |
+
Note:
|
| 38 |
+
- cu_seqlens_q and cu_seqlens_k must have dtype torch.int32 for Metal compatibility
|
| 39 |
+
- Supported head dimensions: 32, 64, 72, 80, 96, 128
|
| 40 |
+
- Masks are not supported
|
| 41 |
+
"""
|
| 42 |
+
if scale is None:
|
| 43 |
+
scale = query.shape[-1] ** -0.5
|
| 44 |
+
|
| 45 |
+
ops.flash_attention_varlen(
|
| 46 |
+
out,
|
| 47 |
+
query,
|
| 48 |
+
key,
|
| 49 |
+
value,
|
| 50 |
+
cu_seqlens_q,
|
| 51 |
+
cu_seqlens_k,
|
| 52 |
+
max_seqlen_q,
|
| 53 |
+
max_seqlen_k,
|
| 54 |
+
do_causal,
|
| 55 |
+
scale,
|
| 56 |
+
softcapping,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
def flash_attn_varlen_func(
|
| 60 |
+
q: torch.Tensor,
|
| 61 |
+
k: torch.Tensor,
|
| 62 |
+
v: torch.Tensor,
|
| 63 |
+
cu_seqlens_q: torch.Tensor,
|
| 64 |
+
cu_seqlens_k: torch.Tensor,
|
| 65 |
+
max_seqlen_q: int,
|
| 66 |
+
max_seqlen_k: int,
|
| 67 |
+
dropout_p: float = 0.0,
|
| 68 |
+
softmax_scale: Optional[float] = None,
|
| 69 |
+
causal: bool = False,
|
| 70 |
+
window_size: tuple = (-1, -1),
|
| 71 |
+
alibi_slopes: Optional[torch.Tensor] = None,
|
| 72 |
+
deterministic: bool = False,
|
| 73 |
+
return_attn_probs: bool = False,
|
| 74 |
+
) -> torch.Tensor:
|
| 75 |
+
"""
|
| 76 |
+
Flash Attention function with API compatible with the original Flash Attention.
|
| 77 |
+
|
| 78 |
+
Note: This implementation does not support:
|
| 79 |
+
- dropout
|
| 80 |
+
- window attention
|
| 81 |
+
- alibi slopes
|
| 82 |
+
- returning attention probabilities
|
| 83 |
+
"""
|
| 84 |
+
if dropout_p > 0:
|
| 85 |
+
raise NotImplementedError("Dropout is not supported in this implementation")
|
| 86 |
+
if window_size != (-1, -1):
|
| 87 |
+
raise NotImplementedError("Window attention is not supported")
|
| 88 |
+
if alibi_slopes is not None:
|
| 89 |
+
raise NotImplementedError("ALiBi is not supported")
|
| 90 |
+
if return_attn_probs:
|
| 91 |
+
raise NotImplementedError("Returning attention probabilities is not supported")
|
| 92 |
+
|
| 93 |
+
# Create output tensor
|
| 94 |
+
out = torch.empty_like(q)
|
| 95 |
+
|
| 96 |
+
# Call the kernel
|
| 97 |
+
flash_attention_varlen(
|
| 98 |
+
out=out,
|
| 99 |
+
query=q,
|
| 100 |
+
key=k,
|
| 101 |
+
value=v,
|
| 102 |
+
cu_seqlens_q=cu_seqlens_q,
|
| 103 |
+
cu_seqlens_k=cu_seqlens_k,
|
| 104 |
+
max_seqlen_q=max_seqlen_q,
|
| 105 |
+
max_seqlen_k=max_seqlen_k,
|
| 106 |
+
do_causal=causal,
|
| 107 |
+
scale=softmax_scale,
|
| 108 |
+
softcapping=1.0,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
return out
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
__all__ = [
|
| 115 |
+
"flash_attention_varlen",
|
| 116 |
+
"flash_attn_varlen_func",
|
| 117 |
+
]
|
build/torch29-metal-aarch64-darwin/_metal_flash_sdpa_4eea15a.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1d238d92f8a97aff87ad98374bab0e207af1011b5502b097eda219c2e52052ee
|
| 3 |
+
size 732160
|
build/torch29-metal-aarch64-darwin/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _metal_flash_sdpa_4eea15a
|
| 3 |
+
ops = torch.ops._metal_flash_sdpa_4eea15a
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_metal_flash_sdpa_4eea15a::{op_name}"
|
build/torch29-metal-aarch64-darwin/metadata.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"python-depends": []
|
| 3 |
+
}
|
build/torch29-metal-aarch64-darwin/metal_flash_sdpa/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|