Kernels
danieldk HF Staff commited on
Commit
5d92932
·
1 Parent(s): 5c6c8ae

Add missing config

Browse files
build/torch210-cxx11-cu128-x86_64-linux/flash_attn_config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Auto-generated by flash attention 3 setup.py
2
+ CONFIG = {'build_flags': {'FLASHATTENTION_DISABLE_BACKWARD': False, 'FLASHATTENTION_DISABLE_SPLIT': False, 'FLASHATTENTION_DISABLE_PAGEDKV': False, 'FLASHATTENTION_DISABLE_APPENDKV': False, 'FLASHATTENTION_DISABLE_LOCAL': False, 'FLASHATTENTION_DISABLE_SOFTCAP': False, 'FLASHATTENTION_DISABLE_PACKGQA': False, 'FLASHATTENTION_DISABLE_FP16': False, 'FLASHATTENTION_DISABLE_FP8': False, 'FLASHATTENTION_DISABLE_VARLEN': False, 'FLASHATTENTION_DISABLE_CLUSTER': False, 'FLASHATTENTION_DISABLE_HDIM64': False, 'FLASHATTENTION_DISABLE_HDIM96': False, 'FLASHATTENTION_DISABLE_HDIM128': False, 'FLASHATTENTION_DISABLE_HDIM192': False, 'FLASHATTENTION_DISABLE_HDIM256': False, 'FLASHATTENTION_DISABLE_SM8x': False, 'FLASHATTENTION_ENABLE_VCOLMAJOR': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF64': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF192': False}}
3
+
4
+ def show():
5
+ from pprint import pprint
6
+ pprint(CONFIG)
7
+
build/torch210-cxx11-cu130-x86_64-linux/flash_attn_config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Auto-generated by flash attention 3 setup.py
2
+ CONFIG = {'build_flags': {'FLASHATTENTION_DISABLE_BACKWARD': False, 'FLASHATTENTION_DISABLE_SPLIT': False, 'FLASHATTENTION_DISABLE_PAGEDKV': False, 'FLASHATTENTION_DISABLE_APPENDKV': False, 'FLASHATTENTION_DISABLE_LOCAL': False, 'FLASHATTENTION_DISABLE_SOFTCAP': False, 'FLASHATTENTION_DISABLE_PACKGQA': False, 'FLASHATTENTION_DISABLE_FP16': False, 'FLASHATTENTION_DISABLE_FP8': False, 'FLASHATTENTION_DISABLE_VARLEN': False, 'FLASHATTENTION_DISABLE_CLUSTER': False, 'FLASHATTENTION_DISABLE_HDIM64': False, 'FLASHATTENTION_DISABLE_HDIM96': False, 'FLASHATTENTION_DISABLE_HDIM128': False, 'FLASHATTENTION_DISABLE_HDIM192': False, 'FLASHATTENTION_DISABLE_HDIM256': False, 'FLASHATTENTION_DISABLE_SM8x': False, 'FLASHATTENTION_ENABLE_VCOLMAJOR': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF64': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF192': False}}
3
+
4
+ def show():
5
+ from pprint import pprint
6
+ pprint(CONFIG)
7
+
build/torch28-cxx11-cu126-x86_64-linux/flash_attn_config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Auto-generated by flash attention 3 setup.py
2
+ CONFIG = {'build_flags': {'FLASHATTENTION_DISABLE_BACKWARD': False, 'FLASHATTENTION_DISABLE_SPLIT': False, 'FLASHATTENTION_DISABLE_PAGEDKV': False, 'FLASHATTENTION_DISABLE_APPENDKV': False, 'FLASHATTENTION_DISABLE_LOCAL': False, 'FLASHATTENTION_DISABLE_SOFTCAP': False, 'FLASHATTENTION_DISABLE_PACKGQA': False, 'FLASHATTENTION_DISABLE_FP16': False, 'FLASHATTENTION_DISABLE_FP8': False, 'FLASHATTENTION_DISABLE_VARLEN': False, 'FLASHATTENTION_DISABLE_CLUSTER': False, 'FLASHATTENTION_DISABLE_HDIM64': False, 'FLASHATTENTION_DISABLE_HDIM96': False, 'FLASHATTENTION_DISABLE_HDIM128': False, 'FLASHATTENTION_DISABLE_HDIM192': False, 'FLASHATTENTION_DISABLE_HDIM256': False, 'FLASHATTENTION_DISABLE_SM8x': False, 'FLASHATTENTION_ENABLE_VCOLMAJOR': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF64': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF192': False}}
3
+
4
+ def show():
5
+ from pprint import pprint
6
+ pprint(CONFIG)
7
+
build/torch28-cxx11-cu128-x86_64-linux/flash_attn_config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Auto-generated by flash attention 3 setup.py
2
+ CONFIG = {'build_flags': {'FLASHATTENTION_DISABLE_BACKWARD': False, 'FLASHATTENTION_DISABLE_SPLIT': False, 'FLASHATTENTION_DISABLE_PAGEDKV': False, 'FLASHATTENTION_DISABLE_APPENDKV': False, 'FLASHATTENTION_DISABLE_LOCAL': False, 'FLASHATTENTION_DISABLE_SOFTCAP': False, 'FLASHATTENTION_DISABLE_PACKGQA': False, 'FLASHATTENTION_DISABLE_FP16': False, 'FLASHATTENTION_DISABLE_FP8': False, 'FLASHATTENTION_DISABLE_VARLEN': False, 'FLASHATTENTION_DISABLE_CLUSTER': False, 'FLASHATTENTION_DISABLE_HDIM64': False, 'FLASHATTENTION_DISABLE_HDIM96': False, 'FLASHATTENTION_DISABLE_HDIM128': False, 'FLASHATTENTION_DISABLE_HDIM192': False, 'FLASHATTENTION_DISABLE_HDIM256': False, 'FLASHATTENTION_DISABLE_SM8x': False, 'FLASHATTENTION_ENABLE_VCOLMAJOR': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF64': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF192': False}}
3
+
4
+ def show():
5
+ from pprint import pprint
6
+ pprint(CONFIG)
7
+
build/torch28-cxx11-cu129-x86_64-linux/flash_attn_config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Auto-generated by flash attention 3 setup.py
2
+ CONFIG = {'build_flags': {'FLASHATTENTION_DISABLE_BACKWARD': False, 'FLASHATTENTION_DISABLE_SPLIT': False, 'FLASHATTENTION_DISABLE_PAGEDKV': False, 'FLASHATTENTION_DISABLE_APPENDKV': False, 'FLASHATTENTION_DISABLE_LOCAL': False, 'FLASHATTENTION_DISABLE_SOFTCAP': False, 'FLASHATTENTION_DISABLE_PACKGQA': False, 'FLASHATTENTION_DISABLE_FP16': False, 'FLASHATTENTION_DISABLE_FP8': False, 'FLASHATTENTION_DISABLE_VARLEN': False, 'FLASHATTENTION_DISABLE_CLUSTER': False, 'FLASHATTENTION_DISABLE_HDIM64': False, 'FLASHATTENTION_DISABLE_HDIM96': False, 'FLASHATTENTION_DISABLE_HDIM128': False, 'FLASHATTENTION_DISABLE_HDIM192': False, 'FLASHATTENTION_DISABLE_HDIM256': False, 'FLASHATTENTION_DISABLE_SM8x': False, 'FLASHATTENTION_ENABLE_VCOLMAJOR': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF64': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF192': False}}
3
+
4
+ def show():
5
+ from pprint import pprint
6
+ pprint(CONFIG)
7
+
build/torch29-cxx11-cu126-x86_64-linux/flash_attn_config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Auto-generated by flash attention 3 setup.py
2
+ CONFIG = {'build_flags': {'FLASHATTENTION_DISABLE_BACKWARD': False, 'FLASHATTENTION_DISABLE_SPLIT': False, 'FLASHATTENTION_DISABLE_PAGEDKV': False, 'FLASHATTENTION_DISABLE_APPENDKV': False, 'FLASHATTENTION_DISABLE_LOCAL': False, 'FLASHATTENTION_DISABLE_SOFTCAP': False, 'FLASHATTENTION_DISABLE_PACKGQA': False, 'FLASHATTENTION_DISABLE_FP16': False, 'FLASHATTENTION_DISABLE_FP8': False, 'FLASHATTENTION_DISABLE_VARLEN': False, 'FLASHATTENTION_DISABLE_CLUSTER': False, 'FLASHATTENTION_DISABLE_HDIM64': False, 'FLASHATTENTION_DISABLE_HDIM96': False, 'FLASHATTENTION_DISABLE_HDIM128': False, 'FLASHATTENTION_DISABLE_HDIM192': False, 'FLASHATTENTION_DISABLE_HDIM256': False, 'FLASHATTENTION_DISABLE_SM8x': False, 'FLASHATTENTION_ENABLE_VCOLMAJOR': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF64': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF192': False}}
3
+
4
+ def show():
5
+ from pprint import pprint
6
+ pprint(CONFIG)
7
+
build/torch29-cxx11-cu128-x86_64-linux/flash_attn_config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Auto-generated by flash attention 3 setup.py
2
+ CONFIG = {'build_flags': {'FLASHATTENTION_DISABLE_BACKWARD': False, 'FLASHATTENTION_DISABLE_SPLIT': False, 'FLASHATTENTION_DISABLE_PAGEDKV': False, 'FLASHATTENTION_DISABLE_APPENDKV': False, 'FLASHATTENTION_DISABLE_LOCAL': False, 'FLASHATTENTION_DISABLE_SOFTCAP': False, 'FLASHATTENTION_DISABLE_PACKGQA': False, 'FLASHATTENTION_DISABLE_FP16': False, 'FLASHATTENTION_DISABLE_FP8': False, 'FLASHATTENTION_DISABLE_VARLEN': False, 'FLASHATTENTION_DISABLE_CLUSTER': False, 'FLASHATTENTION_DISABLE_HDIM64': False, 'FLASHATTENTION_DISABLE_HDIM96': False, 'FLASHATTENTION_DISABLE_HDIM128': False, 'FLASHATTENTION_DISABLE_HDIM192': False, 'FLASHATTENTION_DISABLE_HDIM256': False, 'FLASHATTENTION_DISABLE_SM8x': False, 'FLASHATTENTION_ENABLE_VCOLMAJOR': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF64': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF192': False}}
3
+
4
+ def show():
5
+ from pprint import pprint
6
+ pprint(CONFIG)
7
+
build/torch29-cxx11-cu130-x86_64-linux/flash_attn_config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Auto-generated by flash attention 3 setup.py
2
+ CONFIG = {'build_flags': {'FLASHATTENTION_DISABLE_BACKWARD': False, 'FLASHATTENTION_DISABLE_SPLIT': False, 'FLASHATTENTION_DISABLE_PAGEDKV': False, 'FLASHATTENTION_DISABLE_APPENDKV': False, 'FLASHATTENTION_DISABLE_LOCAL': False, 'FLASHATTENTION_DISABLE_SOFTCAP': False, 'FLASHATTENTION_DISABLE_PACKGQA': False, 'FLASHATTENTION_DISABLE_FP16': False, 'FLASHATTENTION_DISABLE_FP8': False, 'FLASHATTENTION_DISABLE_VARLEN': False, 'FLASHATTENTION_DISABLE_CLUSTER': False, 'FLASHATTENTION_DISABLE_HDIM64': False, 'FLASHATTENTION_DISABLE_HDIM96': False, 'FLASHATTENTION_DISABLE_HDIM128': False, 'FLASHATTENTION_DISABLE_HDIM192': False, 'FLASHATTENTION_DISABLE_HDIM256': False, 'FLASHATTENTION_DISABLE_SM8x': False, 'FLASHATTENTION_ENABLE_VCOLMAJOR': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF64': False, 'FLASH_ATTENTION_DISABLE_HDIMDIFF192': False}}
3
+
4
+ def show():
5
+ from pprint import pprint
6
+ pprint(CONFIG)
7
+