Xeno443 commited on
Commit
1d5c0c5
·
verified ·
1 Parent(s): 9758e85

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ runscripts/attnfiles/flash_attn-2.8.3+cu128torch2.8.0cxx11abiFALSE-cp312-cp312-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
61
+ runscripts/attnfiles/sageattention-2.2.0+cu128torch2.9.0.post3-cp39-abi3-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
runscripts/attnfiles/flash_attn-2.8.3+cu128torch2.8.0cxx11abiFALSE-cp312-cp312-win_amd64.whl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:709d6663d74f96bb0304b5ba2ffee84a4dafbcb56d73d35bc90b7ca8cd44261b
3
+ size 127588011
runscripts/attnfiles/python_3.12.9_include_libs.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59b94a36903a6822c2bb7adf551e33e3aa85b079e0ca3a76ff205a907b7799e6
3
+ size 467075
runscripts/attnfiles/sageattention-2.2.0+cu128torch2.9.0.post3-cp39-abi3-win_amd64.whl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb555644b49eacb26f336bbdf0701779a24c5639a620c963f48a14ab65c2c29c
3
+ size 11963869
runscripts/attnfiles/test_sageattn.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from sageattention import sageattn
6
+ from torch.nn.attention import SDPBackend, sdpa_kernel
7
+
8
+
9
+ def get_rtol_atol(actual, expect):
10
+ actual = actual.float()
11
+ expect = expect.float()
12
+ diff = (actual - expect).abs()
13
+ eps = torch.tensor(
14
+ torch.finfo(actual.dtype).eps, device=actual.device, dtype=actual.dtype
15
+ )
16
+ rdiff = diff / torch.maximum(torch.maximum(actual.abs(), expect.abs()), eps)
17
+ return (
18
+ f"mean_rtol={rdiff.mean().item():.3g} "
19
+ f"max_rtol={rdiff.max().item():.3g} "
20
+ f"mean_atol={diff.max().item():.3g} "
21
+ f"max_atol={diff.max().item():.3g}"
22
+ )
23
+
24
+
25
+ def main():
26
+ batch_size = 4
27
+ head_num = 32
28
+ seq_len = 64
29
+ head_dim = 128
30
+ dtype = torch.float16
31
+
32
+ q = torch.randn(batch_size, head_num, seq_len, head_dim, device="cuda", dtype=dtype)
33
+ k = torch.randn_like(q)
34
+ v = torch.randn_like(q)
35
+ print("q", tuple(q.shape), q.device, q.dtype)
36
+
37
+ # 'Mathematically correct' implementation
38
+ torch.backends.cuda.enable_math_sdp(True)
39
+ with sdpa_kernel(SDPBackend.MATH):
40
+ out_math = F.scaled_dot_product_attention(q, k, v)
41
+
42
+ out_sage = sageattn(q, k, v)
43
+ print("sage vs math:", get_rtol_atol(out_sage, out_math))
44
+ print("The above (except max_rtol) should be < 0.05 (on RTX 20xx/30xx) or < 0.1 (on RTX 40xx/50xx)")
45
+
46
+
47
+ if __name__ == "__main__":
48
+ main()