Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- Code/Baselines/flash-attention/csrc/composable_kernel/.clang-format +90 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/.clang-tidy +3 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/.gitignore +70 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/.pre-commit-config.yaml +20 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/.readthedocs.yaml +18 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/CHANGELOG.md +131 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/CITATION.cff +67 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/CMakeLists.txt +703 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/CONTRIBUTORS.md +35 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/Config.cmake.in +11 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/Dockerfile +123 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/Dockerfile.compiler +26 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/Jenkinsfile +1457 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/LICENSE +28 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/README.md +216 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/TERMINOLOGY.md +2 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/dev-requirements.txt +3 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/pyproject.toml +39 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/rbuild.ini +8 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/requirements.txt +1 -0
- Code/Baselines/flash-attention/csrc/cutlass/cmake/CTestTestfile.test.configure.cmake +43 -0
- Code/Baselines/flash-attention/csrc/flash_attn/flash_api.cpp +1485 -0
- Code/Baselines/flash-attention/csrc/ft_attention/README.md +14 -0
- Code/Baselines/flash-attention/csrc/ft_attention/cuda_bf16_fallbacks.cuh +257 -0
- Code/Baselines/flash-attention/csrc/ft_attention/cuda_bf16_wrapper.h +23 -0
- Code/Baselines/flash-attention/csrc/ft_attention/decoder_masked_multihead_attention.h +192 -0
- Code/Baselines/flash-attention/csrc/ft_attention/decoder_masked_multihead_attention_utils.h +2017 -0
- Code/Baselines/flash-attention/csrc/ft_attention/ft_attention.cpp +231 -0
- Code/Baselines/flash-attention/csrc/ft_attention/setup.py +153 -0
- Code/Baselines/flash-attention/csrc/fused_dense_lib/README.md +13 -0
- Code/Baselines/flash-attention/csrc/fused_dense_lib/fused_dense.cpp +213 -0
- Code/Baselines/flash-attention/csrc/fused_dense_lib/setup.py +42 -0
- Code/Baselines/flash-attention/csrc/fused_softmax/fused_softmax.cpp +148 -0
- Code/Baselines/flash-attention/csrc/fused_softmax/scaled_masked_softmax.h +528 -0
- Code/Baselines/flash-attention/csrc/fused_softmax/scaled_masked_softmax_cuda.cu +121 -0
- Code/Baselines/flash-attention/csrc/fused_softmax/scaled_upper_triang_masked_softmax.h +529 -0
- Code/Baselines/flash-attention/csrc/fused_softmax/scaled_upper_triang_masked_softmax_cuda.cu +98 -0
- Code/Baselines/flash-attention/csrc/fused_softmax/setup.py +50 -0
- Code/Baselines/flash-attention/csrc/fused_softmax/type_shim.h +20 -0
- Code/Baselines/flash-attention/csrc/layer_norm/README.md +20 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln.h +281 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln_api.cpp +846 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_1024.cu +15 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_1280.cu +15 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_2048.cu +15 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_2560.cu +15 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_4096.cu +15 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_5120.cu +15 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_6144.cu +15 -0
- Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_7168.cu +15 -0
Code/Baselines/flash-attention/csrc/composable_kernel/.clang-format
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
Language: Cpp
|
| 3 |
+
AccessModifierOffset: 0
|
| 4 |
+
AlignAfterOpenBracket: Align
|
| 5 |
+
AlignConsecutiveAssignments: true
|
| 6 |
+
AlignConsecutiveDeclarations: false
|
| 7 |
+
AlignEscapedNewlinesLeft: true
|
| 8 |
+
AlignOperands: true
|
| 9 |
+
AlignTrailingComments: true
|
| 10 |
+
AllowAllParametersOfDeclarationOnNextLine: true
|
| 11 |
+
AllowShortBlocksOnASingleLine: true
|
| 12 |
+
AllowShortCaseLabelsOnASingleLine: true
|
| 13 |
+
AllowShortFunctionsOnASingleLine: All
|
| 14 |
+
AllowShortIfStatementsOnASingleLine: false
|
| 15 |
+
AllowShortLoopsOnASingleLine: false
|
| 16 |
+
AlwaysBreakAfterDefinitionReturnType: None
|
| 17 |
+
AlwaysBreakAfterReturnType: None
|
| 18 |
+
AlwaysBreakBeforeMultilineStrings: false
|
| 19 |
+
AlwaysBreakTemplateDeclarations: true
|
| 20 |
+
BinPackArguments: false
|
| 21 |
+
BinPackParameters: false
|
| 22 |
+
BraceWrapping:
|
| 23 |
+
AfterClass: true
|
| 24 |
+
AfterControlStatement: true
|
| 25 |
+
AfterEnum: true
|
| 26 |
+
AfterFunction: true
|
| 27 |
+
AfterNamespace: false
|
| 28 |
+
AfterObjCDeclaration: true
|
| 29 |
+
AfterStruct: true
|
| 30 |
+
AfterUnion: true
|
| 31 |
+
BeforeCatch: true
|
| 32 |
+
BeforeElse: true
|
| 33 |
+
IndentBraces: false
|
| 34 |
+
BreakBeforeBinaryOperators: None
|
| 35 |
+
BreakBeforeBraces: Custom
|
| 36 |
+
BreakBeforeTernaryOperators: true
|
| 37 |
+
BreakConstructorInitializersBeforeComma: false
|
| 38 |
+
ColumnLimit: 100
|
| 39 |
+
CommentPragmas: '^ IWYU pragma:'
|
| 40 |
+
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
| 41 |
+
ConstructorInitializerIndentWidth: 4
|
| 42 |
+
ContinuationIndentWidth: 4
|
| 43 |
+
Cpp11BracedListStyle: true
|
| 44 |
+
DerivePointerAlignment: false
|
| 45 |
+
DisableFormat: false
|
| 46 |
+
ExperimentalAutoDetectBinPacking: false
|
| 47 |
+
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
|
| 48 |
+
IncludeCategories:
|
| 49 |
+
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
|
| 50 |
+
Priority: 2
|
| 51 |
+
- Regex: '^(<|"(gtest|isl|json)/)'
|
| 52 |
+
Priority: 3
|
| 53 |
+
- Regex: '.*'
|
| 54 |
+
Priority: 1
|
| 55 |
+
IndentCaseLabels: false
|
| 56 |
+
IndentWidth: 4
|
| 57 |
+
IndentWrappedFunctionNames: false
|
| 58 |
+
KeepEmptyLinesAtTheStartOfBlocks: true
|
| 59 |
+
MacroBlockBegin: ''
|
| 60 |
+
MacroBlockEnd: ''
|
| 61 |
+
MaxEmptyLinesToKeep: 1
|
| 62 |
+
NamespaceIndentation: None
|
| 63 |
+
ObjCBlockIndentWidth: 2
|
| 64 |
+
ObjCSpaceAfterProperty: false
|
| 65 |
+
ObjCSpaceBeforeProtocolList: true
|
| 66 |
+
PenaltyBreakBeforeFirstCallParameter: 19
|
| 67 |
+
PenaltyBreakComment: 300
|
| 68 |
+
PenaltyBreakFirstLessLess: 120
|
| 69 |
+
PenaltyBreakString: 1000
|
| 70 |
+
PenaltyExcessCharacter: 1000000
|
| 71 |
+
PenaltyReturnTypeOnItsOwnLine: 60
|
| 72 |
+
PointerAlignment: Left
|
| 73 |
+
ReflowComments: true
|
| 74 |
+
SortIncludes: false
|
| 75 |
+
SpaceAfterCStyleCast: false
|
| 76 |
+
# SpaceAfterTemplateKeyword: true
|
| 77 |
+
SpaceBeforeAssignmentOperators: true
|
| 78 |
+
SpaceBeforeParens: Never
|
| 79 |
+
SpaceInEmptyParentheses: false
|
| 80 |
+
SpacesBeforeTrailingComments: 1
|
| 81 |
+
SpacesInAngles: false
|
| 82 |
+
SpacesInContainerLiterals: true
|
| 83 |
+
SpacesInCStyleCastParentheses: false
|
| 84 |
+
SpacesInParentheses: false
|
| 85 |
+
SpacesInSquareBrackets: false
|
| 86 |
+
Standard: Cpp11
|
| 87 |
+
TabWidth: 8
|
| 88 |
+
UseTab: Never
|
| 89 |
+
...
|
| 90 |
+
|
Code/Baselines/flash-attention/csrc/composable_kernel/.clang-tidy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CheckOptions:
|
| 2 |
+
- key: bugprone-reserved-identifier.AllowedIdentifiers
|
| 3 |
+
value: '__HIP_PLATFORM_HCC__;__HIP_PLATFORM_AMD__;__HIP_ROCclr__'
|
Code/Baselines/flash-attention/csrc/composable_kernel/.gitignore
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Compiled Object files
|
| 2 |
+
*.slo
|
| 3 |
+
*.lo
|
| 4 |
+
*.o
|
| 5 |
+
*.obj
|
| 6 |
+
|
| 7 |
+
# Precompiled Headers
|
| 8 |
+
*.gch
|
| 9 |
+
*.pch
|
| 10 |
+
*.ipch
|
| 11 |
+
|
| 12 |
+
# Compiled Dynamic libraries
|
| 13 |
+
*.so
|
| 14 |
+
*.dylib
|
| 15 |
+
*.dll
|
| 16 |
+
|
| 17 |
+
# Fortran module files
|
| 18 |
+
*.mod
|
| 19 |
+
|
| 20 |
+
# Compiled Static libraries
|
| 21 |
+
*.lai
|
| 22 |
+
*.la
|
| 23 |
+
*.a
|
| 24 |
+
*.lib
|
| 25 |
+
|
| 26 |
+
# Executables
|
| 27 |
+
*.exe
|
| 28 |
+
*.out
|
| 29 |
+
*.app
|
| 30 |
+
|
| 31 |
+
# vim tags
|
| 32 |
+
tags
|
| 33 |
+
.tags
|
| 34 |
+
.*.swp
|
| 35 |
+
|
| 36 |
+
# Editors
|
| 37 |
+
.vscode
|
| 38 |
+
|
| 39 |
+
# build-in-source directory
|
| 40 |
+
build*
|
| 41 |
+
|
| 42 |
+
# emacs temporary/backup files
|
| 43 |
+
.\#*
|
| 44 |
+
\#*\#
|
| 45 |
+
*~
|
| 46 |
+
|
| 47 |
+
# GDB temporary files
|
| 48 |
+
.gdb_history
|
| 49 |
+
install.dir*
|
| 50 |
+
|
| 51 |
+
# documentation artifacts
|
| 52 |
+
_build/
|
| 53 |
+
_images/
|
| 54 |
+
_static/
|
| 55 |
+
_templates/
|
| 56 |
+
_toc.yml
|
| 57 |
+
_doxygen/
|
| 58 |
+
docs/doxygen/html
|
| 59 |
+
docs/doxygen/xml
|
| 60 |
+
|
| 61 |
+
# JetBrains IDE
|
| 62 |
+
.idea/
|
| 63 |
+
cmake-build*/
|
| 64 |
+
build*/
|
| 65 |
+
|
| 66 |
+
# Python virtualenv
|
| 67 |
+
.venv/
|
| 68 |
+
|
| 69 |
+
# Python cache
|
| 70 |
+
__pycache__/
|
Code/Baselines/flash-attention/csrc/composable_kernel/.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
repos:
|
| 2 |
+
- repo: local
|
| 3 |
+
hooks:
|
| 4 |
+
- id: clang-format
|
| 5 |
+
name: clang-format
|
| 6 |
+
entry: clang-format-12 -i --style=file
|
| 7 |
+
language: system
|
| 8 |
+
types_or: [c++, inc]
|
| 9 |
+
- id: copyright-year-checker
|
| 10 |
+
name: copyright-year-checker
|
| 11 |
+
entry: script/check_copyright_year.sh
|
| 12 |
+
verbose: false
|
| 13 |
+
language: script
|
| 14 |
+
types: [c++]
|
| 15 |
+
- id: remove-exec-bit
|
| 16 |
+
name: Remove executable bit from non-executable files
|
| 17 |
+
entry: script/remove_exec_bit.sh
|
| 18 |
+
language: script
|
| 19 |
+
types_or: [c++, text]
|
| 20 |
+
verbose: true
|
Code/Baselines/flash-attention/csrc/composable_kernel/.readthedocs.yaml
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Read the Docs configuration file
|
| 2 |
+
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
| 3 |
+
|
| 4 |
+
version: 2
|
| 5 |
+
|
| 6 |
+
sphinx:
|
| 7 |
+
configuration: docs/conf.py
|
| 8 |
+
|
| 9 |
+
formats: [htmlzip, pdf, epub]
|
| 10 |
+
|
| 11 |
+
python:
|
| 12 |
+
install:
|
| 13 |
+
- requirements: docs/sphinx/requirements.txt
|
| 14 |
+
|
| 15 |
+
build:
|
| 16 |
+
os: ubuntu-22.04
|
| 17 |
+
tools:
|
| 18 |
+
python: "3.10"
|
Code/Baselines/flash-attention/csrc/composable_kernel/CHANGELOG.md
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Changelog for Composable Kernel
|
| 2 |
+
|
| 3 |
+
Documentation for Composable Kernel available at [https://rocm.docs.amd.com/projects/composable_kernel/en/latest/](https://rocm.docs.amd.com/projects/composable_kernel/en/latest/).
|
| 4 |
+
|
| 5 |
+
## Composable Kernel 1.1.0 for ROCm 6.5.0
|
| 6 |
+
|
| 7 |
+
### Added
|
| 8 |
+
|
| 9 |
+
* Added support for bf16, f32, and f16 for 2D and 3D NGCHW grouped convolution backward data
|
| 10 |
+
* Added a fully asynchronous HOST (CPU) arguments copy flow for CK grouped GEMM kernels.
|
| 11 |
+
* Added support GKCYX layout for grouped convolution forward (NGCHW/GKCYX/NGKHW, number of instances in instance factory for NGCHW/GKYXC/NGKHW has been reduced).
|
| 12 |
+
* Added support for GKCYX layout for grouped convolution forward (NGCHW/GKCYX/NGKHW).
|
| 13 |
+
* Added support for GKCYX layout for grouped convolution backward weight (NGCHW/GKCYX/NGKHW).
|
| 14 |
+
* Added support for GKCYX layout for grouped convolution backward data (NGCHW/GKCYX/NGKHW).
|
| 15 |
+
* Added support for Stream-K version of mixed fp8/bf16 GEMM
|
| 16 |
+
* Added support for Multiple D GEMM
|
| 17 |
+
* Added GEMM pipeline for microscaling (MX) FP8/FP4 data types
|
| 18 |
+
* Added support for FP16 2:4 structured sparsity to universal GEMM.
|
| 19 |
+
* Added support for Split K for grouped convolution backward data.
|
| 20 |
+
* Added logit soft-capping support for fMHA forward kernels.
|
| 21 |
+
* Added benchmarking support for tile engine GEMM.
|
| 22 |
+
* Added Ping-pong scheduler support for GEMM operation along the K dimension.
|
| 23 |
+
* Added rotating buffer feature for CK_Tile GEMM.
|
| 24 |
+
|
| 25 |
+
### Optimized
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
* Optimize the gemm multiply multiply preshuffle & lds bypass with Pack of KGroup and better instruction layout. (#2166)
|
| 29 |
+
* Added Vectorize Transpose optimization for CK Tile (#2131)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
### Fixes
|
| 33 |
+
|
| 34 |
+
None
|
| 35 |
+
|
| 36 |
+
### Changes
|
| 37 |
+
|
| 38 |
+
* Removed support for gfx940 and gfx941 targets (#1944)
|
| 39 |
+
* Replaced the raw buffer load/store intrinsics with Clang20 built-ins (#1876)
|
| 40 |
+
* DL and DPP kernels are now enabled by default.
|
| 41 |
+
* Number of instances in instance factory for grouped convolution forward NGCHW/GKYXC/NGKHW has been reduced.
|
| 42 |
+
* Number of instances in instance factory for grouped convolution backward weight NGCHW/GKYXC/NGKHW has been reduced.
|
| 43 |
+
* Number of instances in instance factory for grouped convolution backward data NGCHW/GKYXC/NGKHW has been reduced.
|
| 44 |
+
|
| 45 |
+
### Known issues
|
| 46 |
+
|
| 47 |
+
None
|
| 48 |
+
|
| 49 |
+
## Composable Kernel 1.1.0 for ROCm 6.1.0
|
| 50 |
+
|
| 51 |
+
### Additions
|
| 52 |
+
|
| 53 |
+
* Added generic instances for GEMM XDL operations (#1161)
|
| 54 |
+
* Added gamma and beta parameters for the layernorm and groupnorm bwd operations (#1133)
|
| 55 |
+
* Introduced wrapper sublibrary (limited functionality). (#1071, #1098, #1108, #1126)
|
| 56 |
+
* Added an option to vary the number of warm-up cycles and iterations for ckProfiler (#1124)
|
| 57 |
+
|
| 58 |
+
### Optimizations
|
| 59 |
+
|
| 60 |
+
* New performance optimizations for GEMM operations on MI200 and MI300 architectures (#1135)
|
| 61 |
+
|
| 62 |
+
### Fixes
|
| 63 |
+
|
| 64 |
+
* Reduced the build time for most GPU architectures (#1084)
|
| 65 |
+
* Fixed some conversion issues for fp8 data type (#1099)
|
| 66 |
+
|
| 67 |
+
### Changes
|
| 68 |
+
|
| 69 |
+
None
|
| 70 |
+
|
| 71 |
+
### Known issues
|
| 72 |
+
|
| 73 |
+
None
|
| 74 |
+
|
| 75 |
+
## Composable Kernel 1.1.0 for ROCm 6.0.0
|
| 76 |
+
|
| 77 |
+
### Fixes
|
| 78 |
+
|
| 79 |
+
* Fixed a hazard associated with inline v_dot (#808)
|
| 80 |
+
* Fixed two bugs in grouped convolution backward data without K padding (#848 #876)
|
| 81 |
+
|
| 82 |
+
### Optimizations
|
| 83 |
+
|
| 84 |
+
None
|
| 85 |
+
|
| 86 |
+
### Additions
|
| 87 |
+
|
| 88 |
+
* Added an image to a column kernel (#867)
|
| 89 |
+
* Added a column to an image kernel (#930)
|
| 90 |
+
* Support for 3D grouped convolution on RDNA 3 GPUs (#935, #950, #985)
|
| 91 |
+
* Grouped convolution support for small K and C (#822 #879 #897)
|
| 92 |
+
* Support for NHWGC (2D and 3D) grouped convolution backward weight (#769 #804)
|
| 93 |
+
* Support for bf16/f32/f16 and NHWGC (2D and 3D) grouped convolution backward data (#757 #799)
|
| 94 |
+
* Support for Batched GEMM DL (#732)
|
| 95 |
+
|
| 96 |
+
### Changes
|
| 97 |
+
|
| 98 |
+
* Changed the grouped convolution API to maintain consistency with other convolution kernels (#817)
|
| 99 |
+
|
| 100 |
+
## Composable Kernel 0.2.0 for ROCm 5.7.0
|
| 101 |
+
|
| 102 |
+
### Fixes
|
| 103 |
+
|
| 104 |
+
* Fixed a bug in 6-dimensional kernels (#555)
|
| 105 |
+
* Fixed a test case failure with grouped convolution backward weight (#524)
|
| 106 |
+
|
| 107 |
+
### Optimizations
|
| 108 |
+
|
| 109 |
+
* Improved the performance of the normalization kernel
|
| 110 |
+
|
| 111 |
+
### Additions
|
| 112 |
+
|
| 113 |
+
* New CMake flags:
|
| 114 |
+
* "DL_KERNELS"-* Must be set to "ON" in order to build the GEMM DL and batched_gemm_multi_d_dl instances
|
| 115 |
+
* "DTYPES" -- Can be set to any subset of "fp64;fp32;fp16;fp8;bf16;int8" to build an instance of the specified data types
|
| 116 |
+
* "INSTANCES_ONLY" -- Only builds CK library and instances without tests, examples, or profiler
|
| 117 |
+
* New feature: if GPU_TARGETS is not set in the CMake command line, CK will be built for all targets supported by the compiler
|
| 118 |
+
* Support for MI300A/MI300X
|
| 119 |
+
* Support for AMD RDNA 3
|
| 120 |
+
* New user tutorial (#563)
|
| 121 |
+
* Additional instances for irregular GEMM sizes (#560)
|
| 122 |
+
* New inter-wave consumer-producer programming model for GEMM kernels (#310)
|
| 123 |
+
* GEMM with support multiple elementwise fusions (multi-D) (#534)
|
| 124 |
+
* Multi-embeddings support (#542)
|
| 125 |
+
* AMD RDNA 3 blockwise GEMM and real GEMM support (#541)
|
| 126 |
+
* AMD RDNA grouped convolution backward weight support (#505)
|
| 127 |
+
* MaxPool and AvgPool forward (#815); MaxPool backward (#750)
|
| 128 |
+
|
| 129 |
+
### Changes
|
| 130 |
+
|
| 131 |
+
None
|
Code/Baselines/flash-attention/csrc/composable_kernel/CITATION.cff
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cff-version: 1.2.0
|
| 2 |
+
title: Composable Kernel
|
| 3 |
+
message: If you use this software, please cite using the following metadata.
|
| 4 |
+
type: software
|
| 5 |
+
authors:
|
| 6 |
+
- given-names: Chao
|
| 7 |
+
family-names: Liu
|
| 8 |
+
email: chao.liu2@amd.com
|
| 9 |
+
affiliation: AMD
|
| 10 |
+
- given-names: Jing
|
| 11 |
+
family-names: Zhang
|
| 12 |
+
email: jing.zhang3@amd.com
|
| 13 |
+
affiliation: AMD
|
| 14 |
+
- given-names: Letao
|
| 15 |
+
family-names: Qin
|
| 16 |
+
email: letao.qin@amd.com
|
| 17 |
+
affiliation: AMD
|
| 18 |
+
- given-names: Qianfeng
|
| 19 |
+
family-names: Zhang
|
| 20 |
+
email: qianfeng.zhang@amd.com
|
| 21 |
+
affiliation: AMD
|
| 22 |
+
- given-names: Liang
|
| 23 |
+
family-names: Huang
|
| 24 |
+
email: carlus.huang@amd.com
|
| 25 |
+
affiliation: AMD
|
| 26 |
+
- given-names: Shaojie
|
| 27 |
+
family-names: Wang
|
| 28 |
+
email: shaojie.wang@amd.com
|
| 29 |
+
affiliation: AMD
|
| 30 |
+
- given-names: Anthony
|
| 31 |
+
family-names: Chang
|
| 32 |
+
email: antc@amd.com
|
| 33 |
+
affiliation: AMD
|
| 34 |
+
- given-names: Chunyu
|
| 35 |
+
family-names: Lai
|
| 36 |
+
email: chunyu.lai@amd.com
|
| 37 |
+
affiliation: AMD
|
| 38 |
+
- given-names: Illia
|
| 39 |
+
family-names: Silin
|
| 40 |
+
email: illia.silin@amd.com
|
| 41 |
+
affiliation: AMD
|
| 42 |
+
- given-names: Adam
|
| 43 |
+
family-names: Osewski
|
| 44 |
+
email: adam.osewski@amd.com
|
| 45 |
+
affiliation: AMD
|
| 46 |
+
- given-names: Poyen
|
| 47 |
+
family-names: Chen
|
| 48 |
+
email: poyen.chen@amd.com
|
| 49 |
+
affiliation: AMD
|
| 50 |
+
- given-names: Rosty
|
| 51 |
+
family-names: Geyyer
|
| 52 |
+
email: rosty.geyyer@amd.com
|
| 53 |
+
affiliation: AMD
|
| 54 |
+
- given-names: Hanwen
|
| 55 |
+
family-names: Chen
|
| 56 |
+
- given-names: Tejash
|
| 57 |
+
family-names: Shah
|
| 58 |
+
- given-names: Xiaoyan
|
| 59 |
+
family-names: Zhou
|
| 60 |
+
- given-names: Jianfeng
|
| 61 |
+
family-names: Yan
|
| 62 |
+
repository-code: 'https://github.com/ROCm/composable_kernel'
|
| 63 |
+
abstract: Composable Kernel (CK) library aims to provide a programming model for writing performance critical kernels for Machine Learning workloads across multiple architectures including GPUs, CPUs, etc, through general purpose kernel progarmming languages, like HIP C++.
|
| 64 |
+
keywords:
|
| 65 |
+
- 'CK, Composable Kernel, Tensor Coordinate Transformation'
|
| 66 |
+
license: MIT
|
| 67 |
+
license-url: https://github.com/ROCm/composable_kernel/blob/7fc3ed761aa35709d87c8fbbe41dd368648b3541/LICENSE
|
Code/Baselines/flash-attention/csrc/composable_kernel/CMakeLists.txt
ADDED
|
@@ -0,0 +1,703 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cmake_minimum_required(VERSION 3.14)
|
| 2 |
+
if(POLICY CMP0140)
|
| 3 |
+
# policies CMP0140 not known to CMake until 3.25
|
| 4 |
+
cmake_policy(SET CMP0140 NEW)
|
| 5 |
+
endif()
|
| 6 |
+
|
| 7 |
+
get_property(_GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
| 8 |
+
|
| 9 |
+
# This has to be initialized before the project() command appears
|
| 10 |
+
# Set the default of CMAKE_BUILD_TYPE to be release, unless user specifies with -D. MSVC_IDE does not use CMAKE_BUILD_TYPE
|
| 11 |
+
if(_GENERATOR_IS_MULTI_CONFIG)
|
| 12 |
+
set(CMAKE_CONFIGURATION_TYPES "Debug;Release;RelWithDebInfo;MinSizeRel" CACHE STRING
|
| 13 |
+
"Available build types (configurations) on multi-config generators")
|
| 14 |
+
else()
|
| 15 |
+
set(CMAKE_BUILD_TYPE Release CACHE STRING
|
| 16 |
+
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel.")
|
| 17 |
+
endif()
|
| 18 |
+
|
| 19 |
+
# Default installation path
|
| 20 |
+
if(NOT WIN32)
|
| 21 |
+
set(CMAKE_INSTALL_PREFIX "/opt/rocm" CACHE PATH "")
|
| 22 |
+
endif()
|
| 23 |
+
|
| 24 |
+
set(version 1.1.0)
|
| 25 |
+
# Check support for CUDA/HIP in Cmake
|
| 26 |
+
project(composable_kernel VERSION ${version} LANGUAGES CXX HIP)
|
| 27 |
+
include(CTest)
|
| 28 |
+
|
| 29 |
+
option(ENABLE_CLANG_CPP_CHECKS "Enables clang tidy, cppcheck" ON)
|
| 30 |
+
option(MIOPEN_REQ_LIBS_ONLY "Build only the MIOpen required libraries" OFF)
|
| 31 |
+
option(BUILD_MHA_LIB "Build the static library for flash attention" OFF)
|
| 32 |
+
|
| 33 |
+
# Usage: for customized Python location cmake -DCK_USE_ALTERNATIVE_PYTHON="/opt/Python-3.8.13/bin/python3.8"
|
| 34 |
+
# CK Codegen requires dataclass which is added in Python 3.7
|
| 35 |
+
# Python version 3.8 is required for general good practice as it is default for Ubuntu 20.04
|
| 36 |
+
if(NOT CK_USE_ALTERNATIVE_PYTHON)
|
| 37 |
+
find_package(Python3 3.8 COMPONENTS Interpreter REQUIRED)
|
| 38 |
+
else()
|
| 39 |
+
message(STATUS "Using alternative python version")
|
| 40 |
+
set(EXTRA_PYTHON_PATH)
|
| 41 |
+
# this is overly restrictive, we may need to be more flexible on the following
|
| 42 |
+
string(REPLACE "/bin/python3.8" "" EXTRA_PYTHON_PATH "${CK_USE_ALTERNATIVE_PYTHON}")
|
| 43 |
+
message(STATUS "alternative python path is: ${EXTRA_PYTHON_PATH}")
|
| 44 |
+
find_package(Python3 3.6 COMPONENTS Interpreter REQUIRED)
|
| 45 |
+
add_definitions(-DPython3_EXECUTABLE="${CK_USE_ALTERNATIVE_PYTHON}")
|
| 46 |
+
set(Python3_EXECUTABLE "${CK_USE_ALTERNATIVE_PYTHON}")
|
| 47 |
+
set(PYTHON_EXECUTABLE "${CK_USE_ALTERNATIVE_PYTHON}")
|
| 48 |
+
set(ENV{LD_LIBRARY_PATH} "${EXTRA_PYTHON_PATH}/lib:$ENV{LD_LIBRARY_PATH}")
|
| 49 |
+
endif()
|
| 50 |
+
|
| 51 |
+
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake")
|
| 52 |
+
|
| 53 |
+
if (DTYPES)
|
| 54 |
+
add_definitions(-DDTYPES)
|
| 55 |
+
if (DTYPES MATCHES "int8")
|
| 56 |
+
add_definitions(-DCK_ENABLE_INT8)
|
| 57 |
+
set(CK_ENABLE_INT8 "ON")
|
| 58 |
+
endif()
|
| 59 |
+
if (DTYPES MATCHES "fp8")
|
| 60 |
+
add_definitions(-DCK_ENABLE_FP8)
|
| 61 |
+
set(CK_ENABLE_FP8 "ON")
|
| 62 |
+
endif()
|
| 63 |
+
if (DTYPES MATCHES "bf8")
|
| 64 |
+
add_definitions(-DCK_ENABLE_BF8)
|
| 65 |
+
set(CK_ENABLE_BF8 "ON")
|
| 66 |
+
endif()
|
| 67 |
+
if (DTYPES MATCHES "fp16")
|
| 68 |
+
add_definitions(-DCK_ENABLE_FP16)
|
| 69 |
+
set(CK_ENABLE_FP16 "ON")
|
| 70 |
+
endif()
|
| 71 |
+
if (DTYPES MATCHES "fp32")
|
| 72 |
+
add_definitions(-DCK_ENABLE_FP32)
|
| 73 |
+
set(CK_ENABLE_FP32 "ON")
|
| 74 |
+
endif()
|
| 75 |
+
if (DTYPES MATCHES "fp64")
|
| 76 |
+
add_definitions(-DCK_ENABLE_FP64)
|
| 77 |
+
set(CK_ENABLE_FP64 "ON")
|
| 78 |
+
endif()
|
| 79 |
+
if (DTYPES MATCHES "bf16")
|
| 80 |
+
add_definitions(-DCK_ENABLE_BF16)
|
| 81 |
+
set(CK_ENABLE_BF16 "ON")
|
| 82 |
+
endif()
|
| 83 |
+
message(STATUS "DTYPES macro set to ${DTYPES}")
|
| 84 |
+
else()
|
| 85 |
+
add_definitions(-DCK_ENABLE_INT8 -DCK_ENABLE_FP16 -DCK_ENABLE_FP32 -DCK_ENABLE_FP64 -DCK_ENABLE_BF16 -DCK_ENABLE_FP8 -DCK_ENABLE_BF8)
|
| 86 |
+
set(CK_ENABLE_INT8 "ON")
|
| 87 |
+
set(CK_ENABLE_FP16 "ON")
|
| 88 |
+
set(CK_ENABLE_FP32 "ON")
|
| 89 |
+
set(CK_ENABLE_FP64 "ON")
|
| 90 |
+
set(CK_ENABLE_BF16 "ON")
|
| 91 |
+
set(CK_ENABLE_FP8 "ON")
|
| 92 |
+
set(CK_ENABLE_BF8 "ON")
|
| 93 |
+
endif()
|
| 94 |
+
|
| 95 |
+
#for f8/bf8_t type
|
| 96 |
+
add_compile_options(-Wno-bit-int-extension)
|
| 97 |
+
add_compile_options(-Wno-pass-failed)
|
| 98 |
+
add_compile_options(-Wno-switch-default)
|
| 99 |
+
add_compile_options(-Wno-unique-object-duplication)
|
| 100 |
+
|
| 101 |
+
# Recent change in compiler makes this warning ON by default, which led to compile errors.
|
| 102 |
+
add_compile_options(-Wno-nrvo)
|
| 103 |
+
|
| 104 |
+
if(NOT DISABLE_DL_KERNELS)
|
| 105 |
+
add_definitions(-DDL_KERNELS)
|
| 106 |
+
set(DL_KERNELS "ON")
|
| 107 |
+
set(CK_ENABLE_DL_KERNELS "ON")
|
| 108 |
+
endif()
|
| 109 |
+
if(NOT DISABLE_DPP_KERNELS)
|
| 110 |
+
add_definitions(-DDPP_KERNELS)
|
| 111 |
+
set(DPP_KERNELS "ON")
|
| 112 |
+
set(CK_ENABLE_DPP_KERNELS "ON")
|
| 113 |
+
endif()
|
| 114 |
+
option(CK_USE_CODEGEN "Enable codegen library" OFF)
|
| 115 |
+
if(CK_USE_CODEGEN)
|
| 116 |
+
add_definitions(-DCK_USE_CODEGEN)
|
| 117 |
+
endif()
|
| 118 |
+
|
| 119 |
+
option(CK_TIME_KERNEL "Enable kernel time tracking" ON)
|
| 120 |
+
if(CK_TIME_KERNEL)
|
| 121 |
+
add_definitions(-DCK_TIME_KERNEL=1)
|
| 122 |
+
else()
|
| 123 |
+
add_definitions(-DCK_TIME_KERNEL=0)
|
| 124 |
+
endif()
|
| 125 |
+
|
| 126 |
+
include(getopt)
|
| 127 |
+
|
| 128 |
+
# CK version file to record release version as well as git commit hash
|
| 129 |
+
find_package(Git REQUIRED)
|
| 130 |
+
execute_process(COMMAND "${GIT_EXECUTABLE}" rev-parse HEAD OUTPUT_VARIABLE COMMIT_ID OUTPUT_STRIP_TRAILING_WHITESPACE)
|
| 131 |
+
configure_file(include/ck/version.h.in ${CMAKE_CURRENT_BINARY_DIR}/include/ck/version.h)
|
| 132 |
+
|
| 133 |
+
set(ROCM_SYMLINK_LIBS OFF)
|
| 134 |
+
find_package(ROCM REQUIRED PATHS /opt/rocm)
|
| 135 |
+
|
| 136 |
+
include(ROCMInstallTargets)
|
| 137 |
+
include(ROCMPackageConfigHelpers)
|
| 138 |
+
include(ROCMSetupVersion)
|
| 139 |
+
include(ROCMInstallSymlinks)
|
| 140 |
+
include(ROCMCreatePackage)
|
| 141 |
+
include(CheckCXXCompilerFlag)
|
| 142 |
+
include(ROCMCheckTargetIds)
|
| 143 |
+
include(TargetFlags)
|
| 144 |
+
|
| 145 |
+
rocm_setup_version(VERSION ${version})
|
| 146 |
+
|
| 147 |
+
list(APPEND CMAKE_PREFIX_PATH ${CMAKE_INSTALL_PREFIX} ${CMAKE_INSTALL_PREFIX}/llvm ${CMAKE_INSTALL_PREFIX}/hip /opt/rocm /opt/rocm/llvm /opt/rocm/hip "$ENV{ROCM_PATH}" "$ENV{HIP_PATH}")
|
| 148 |
+
|
| 149 |
+
message(STATUS "GPU_TARGETS= ${GPU_TARGETS}")
|
| 150 |
+
message(STATUS "GPU_ARCHS= ${GPU_ARCHS}")
|
| 151 |
+
if(GPU_ARCHS)
|
| 152 |
+
#disable GPU_TARGETS to avoid conflicts, this needs to happen before we call hip package
|
| 153 |
+
unset(GPU_TARGETS CACHE)
|
| 154 |
+
unset(AMDGPU_TARGETS CACHE)
|
| 155 |
+
endif()
|
| 156 |
+
if(GPU_TARGETS)
|
| 157 |
+
set(USER_GPU_TARGETS 1)
|
| 158 |
+
else()
|
| 159 |
+
set(USER_GPU_TARGETS 0)
|
| 160 |
+
endif()
|
| 161 |
+
find_package(hip REQUIRED)
|
| 162 |
+
# No assumption that HIP kernels are launched with uniform block size for backward compatibility
|
| 163 |
+
# SWDEV-413293 and https://reviews.llvm.org/D155213
|
| 164 |
+
math(EXPR hip_VERSION_FLAT "(${hip_VERSION_MAJOR} * 1000 + ${hip_VERSION_MINOR}) * 100000 + ${hip_VERSION_PATCH}")
|
| 165 |
+
message(STATUS "hip_version_flat=${hip_VERSION_FLAT}")
|
| 166 |
+
|
| 167 |
+
message(STATUS "checking which targets are supported")
|
| 168 |
+
#In order to build just the CK library (without tests and examples) for all supported GPU targets
|
| 169 |
+
#use -D GPU_ARCHS="gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201"
|
| 170 |
+
#the GPU_TARGETS flag will be reset in this case in order to avoid conflicts.
|
| 171 |
+
#
|
| 172 |
+
#In order to build CK along with all tests and examples it should be OK to set GPU_TARGETS to just 1 or 2 similar architectures.
|
| 173 |
+
if(NOT ENABLE_ASAN_PACKAGING)
|
| 174 |
+
if(NOT WIN32 AND ${hip_VERSION_FLAT} LESS 600300000)
|
| 175 |
+
# WORKAROUND: compiler does not yet fully support gfx12 targets, need to fix version above
|
| 176 |
+
set(CK_GPU_TARGETS "gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102")
|
| 177 |
+
elseif(NOT WIN32 AND ${hip_VERSION_FLAT} GREATER_EQUAL 600300000 AND ${hip_VERSION_FLAT} LESS 600400000)
|
| 178 |
+
set(CK_GPU_TARGETS "gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201")
|
| 179 |
+
elseif(NOT WIN32 AND ${hip_VERSION_FLAT} GREATER_EQUAL 600400000 AND ${hip_VERSION_FLAT} LESS 600443483)
|
| 180 |
+
set(CK_GPU_TARGETS "gfx908;gfx90a;gfx942;gfx1030;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx950")
|
| 181 |
+
elseif(NOT WIN32 AND ${hip_VERSION_FLAT} GREATER_EQUAL 600443483)
|
| 182 |
+
set(CK_GPU_TARGETS "gfx908;gfx90a;gfx942;gfx950;gfx10-3-generic;gfx11-generic;gfx12-generic")
|
| 183 |
+
endif()
|
| 184 |
+
else()
|
| 185 |
+
#build CK only for xnack-supported targets when using ASAN
|
| 186 |
+
set(CK_GPU_TARGETS "gfx908:xnack+;gfx90a:xnack+;gfx942:xnack+")
|
| 187 |
+
endif()
|
| 188 |
+
|
| 189 |
+
#if user set GPU_ARCHS on the cmake command line, overwrite default target list with user's list
|
| 190 |
+
#otherwise, if user set GPU_TARGETS, use that set of targets
|
| 191 |
+
if(GPU_ARCHS)
|
| 192 |
+
set(CK_GPU_TARGETS ${GPU_ARCHS})
|
| 193 |
+
else()
|
| 194 |
+
if(USER_GPU_TARGETS)
|
| 195 |
+
set(CK_GPU_TARGETS ${GPU_TARGETS})
|
| 196 |
+
endif()
|
| 197 |
+
endif()
|
| 198 |
+
#if the user did not set GPU_TARGETS, delete whatever was set by HIP package
|
| 199 |
+
if(NOT USER_GPU_TARGETS)
|
| 200 |
+
set(GPU_TARGETS "")
|
| 201 |
+
endif()
|
| 202 |
+
#make sure all the targets on the list are actually supported by the current compiler
|
| 203 |
+
rocm_check_target_ids(SUPPORTED_GPU_TARGETS
|
| 204 |
+
TARGETS ${CK_GPU_TARGETS})
|
| 205 |
+
|
| 206 |
+
message(STATUS "Building CK for the following targets: ${SUPPORTED_GPU_TARGETS}")
|
| 207 |
+
|
| 208 |
+
if (SUPPORTED_GPU_TARGETS MATCHES "gfx9")
|
| 209 |
+
message(STATUS "Enabling XDL instances")
|
| 210 |
+
add_definitions(-DCK_USE_XDL)
|
| 211 |
+
set(CK_USE_XDL "ON")
|
| 212 |
+
endif()
|
| 213 |
+
if (SUPPORTED_GPU_TARGETS MATCHES "gfx94" OR SUPPORTED_GPU_TARGETS MATCHES "gfx95")
|
| 214 |
+
message(STATUS "Enabling XDL FP8 gemms on native architectures")
|
| 215 |
+
add_definitions(-DCK_USE_GFX94)
|
| 216 |
+
set(CK_USE_GFX94 "ON")
|
| 217 |
+
endif()
|
| 218 |
+
if (SUPPORTED_GPU_TARGETS MATCHES "gfx11" OR SUPPORTED_GPU_TARGETS MATCHES "gfx12")
|
| 219 |
+
message(STATUS "Enabling WMMA instances")
|
| 220 |
+
add_definitions(-DCK_USE_WMMA)
|
| 221 |
+
set(CK_USE_WMMA "ON")
|
| 222 |
+
endif()
|
| 223 |
+
if (SUPPORTED_GPU_TARGETS MATCHES "gfx12")
|
| 224 |
+
message(STATUS "Enabling WMMA FP8 gemms on native architectures")
|
| 225 |
+
add_definitions(-DCK_USE_WMMA_FP8)
|
| 226 |
+
set(CK_USE_WMMA_FP8 "ON")
|
| 227 |
+
endif()
|
| 228 |
+
if (SUPPORTED_GPU_TARGETS MATCHES "gfx12" OR SUPPORTED_GPU_TARGETS MATCHES "gfx950")
|
| 229 |
+
add_definitions(-DCK_USE_OCP_FP8)
|
| 230 |
+
set(CK_USE_OCP_FP8 "ON")
|
| 231 |
+
endif()
|
| 232 |
+
if (SUPPORTED_GPU_TARGETS MATCHES "gfx90a" OR SUPPORTED_GPU_TARGETS MATCHES "gfx94")
|
| 233 |
+
add_definitions(-DCK_USE_FNUZ_FP8)
|
| 234 |
+
set(CK_USE_FNUZ_FP8 "ON")
|
| 235 |
+
endif()
|
| 236 |
+
if (SUPPORTED_GPU_TARGETS MATCHES "gfx950")
|
| 237 |
+
add_definitions(-DCK_USE_NATIVE_MX_SUPPORT)
|
| 238 |
+
set(CK_USE_NATIVE_MX_SUPPORT "ON")
|
| 239 |
+
endif()
|
| 240 |
+
|
| 241 |
+
option(CK_USE_FP8_ON_UNSUPPORTED_ARCH "Enable FP8 GEMM instances on older architectures" OFF)
|
| 242 |
+
if(CK_USE_FP8_ON_UNSUPPORTED_ARCH AND (SUPPORTED_GPU_TARGETS MATCHES "gfx90a" OR SUPPORTED_GPU_TARGETS MATCHES "gfx908"))
|
| 243 |
+
add_definitions(-DCK_USE_FP8_ON_UNSUPPORTED_ARCH)
|
| 244 |
+
set(CK_USE_FP8_ON_UNSUPPORTED_ARCH "ON")
|
| 245 |
+
endif()
|
| 246 |
+
|
| 247 |
+
# CK config file to record supported datatypes, etc.
|
| 248 |
+
configure_file(include/ck/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/include/ck/config.h)
|
| 249 |
+
|
| 250 |
+
if(NOT WIN32 AND ${hip_VERSION_FLAT} GREATER 500723302)
|
| 251 |
+
check_cxx_compiler_flag("-fno-offload-uniform-block" HAS_NO_OFFLOAD_UNIFORM_BLOCK)
|
| 252 |
+
if(HAS_NO_OFFLOAD_UNIFORM_BLOCK)
|
| 253 |
+
message(STATUS "Adding the fno-offload-uniform-block compiler flag")
|
| 254 |
+
add_compile_options(-fno-offload-uniform-block)
|
| 255 |
+
endif()
|
| 256 |
+
endif()
|
| 257 |
+
if(NOT WIN32 AND ${hip_VERSION_FLAT} GREATER 500500000)
|
| 258 |
+
check_cxx_compiler_flag("-mllvm --lsr-drop-solution=1" HAS_LSR_DROP_SOLUTION)
|
| 259 |
+
if(HAS_LSR_DROP_SOLUTION)
|
| 260 |
+
message(STATUS "Adding the lsr-drop-solution=1 compiler flag")
|
| 261 |
+
add_compile_options("SHELL: -mllvm --lsr-drop-solution=1")
|
| 262 |
+
endif()
|
| 263 |
+
endif()
|
| 264 |
+
if(NOT WIN32 AND ${hip_VERSION_FLAT} GREATER 600140090)
|
| 265 |
+
check_cxx_compiler_flag("-mllvm -enable-post-misched=0" HAS_ENABLE_POST_MISCHED)
|
| 266 |
+
if(HAS_ENABLE_POST_MISCHED)
|
| 267 |
+
message(STATUS "Adding the enable-post-misched=0 compiler flag")
|
| 268 |
+
add_compile_options("SHELL: -mllvm -enable-post-misched=0")
|
| 269 |
+
endif()
|
| 270 |
+
endif()
|
| 271 |
+
set(check-coerce)
|
| 272 |
+
check_cxx_compiler_flag(" -mllvm -amdgpu-coerce-illegal-types=1" check-coerce)
|
| 273 |
+
if(NOT WIN32 AND check-coerce AND ${hip_VERSION_FLAT} GREATER 600241132)
|
| 274 |
+
message(STATUS "Adding the amdgpu-coerce-illegal-types=1")
|
| 275 |
+
add_compile_options("SHELL: -mllvm -amdgpu-coerce-illegal-types=1")
|
| 276 |
+
endif()
|
| 277 |
+
if(NOT WIN32 AND ${hip_VERSION_FLAT} GREATER 600241132)
|
| 278 |
+
message(STATUS "Adding -amdgpu-early-inline-all=true and -amdgpu-function-calls=false")
|
| 279 |
+
add_compile_options("SHELL: -mllvm -amdgpu-early-inline-all=true")
|
| 280 |
+
add_compile_options("SHELL: -mllvm -amdgpu-function-calls=false")
|
| 281 |
+
endif()
|
| 282 |
+
#
|
| 283 |
+
# Seperate linking jobs from compiling
|
| 284 |
+
# Too many concurrent linking jobs can break the build
|
| 285 |
+
# Copied from LLVM
|
| 286 |
+
set(CK_PARALLEL_LINK_JOBS "" CACHE STRING
|
| 287 |
+
"Define the maximum number of concurrent link jobs (Ninja only).")
|
| 288 |
+
if(CMAKE_GENERATOR MATCHES "Ninja")
|
| 289 |
+
if(CK_PARALLEL_LINK_JOBS)
|
| 290 |
+
set_property(GLOBAL APPEND PROPERTY JOB_POOLS link_job_pool=${CK_PARALLEL_LINK_JOBS})
|
| 291 |
+
set(CMAKE_JOB_POOL_LINK link_job_pool)
|
| 292 |
+
endif()
|
| 293 |
+
elseif(CK_PARALLEL_LINK_JOBS)
|
| 294 |
+
message(WARNING "Job pooling is only available with Ninja generators.")
|
| 295 |
+
endif()
|
| 296 |
+
# Similar for compiling
|
| 297 |
+
set(CK_PARALLEL_COMPILE_JOBS "" CACHE STRING
|
| 298 |
+
"Define the maximum number of concurrent compile jobs (Ninja only).")
|
| 299 |
+
if(CMAKE_GENERATOR MATCHES "Ninja")
|
| 300 |
+
if(CK_PARALLEL_COMPILE_JOBS)
|
| 301 |
+
set_property(GLOBAL APPEND PROPERTY JOB_POOLS compile_job_pool=${CK_PARALLEL_COMPILE_JOBS})
|
| 302 |
+
set(CMAKE_JOB_POOL_COMPILE compile_job_pool)
|
| 303 |
+
endif()
|
| 304 |
+
elseif(CK_PARALLEL_COMPILE_JOBS)
|
| 305 |
+
message(WARNING "Job pooling is only available with Ninja generators.")
|
| 306 |
+
endif()
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
option(USE_BITINT_EXTENSION_INT4 "Whether to enable clang's BitInt extension to provide int4 data type." OFF)
|
| 310 |
+
option(USE_OPT_GFX11 "Whether to enable LDS cumode and Wavefront32 mode for GFX11 silicons." OFF)
|
| 311 |
+
option(ENABLE_ASM_DUMP "Whether to enable assembly dump for kernels." OFF)
|
| 312 |
+
|
| 313 |
+
if(USE_BITINT_EXTENSION_INT4)
|
| 314 |
+
add_compile_definitions(CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4)
|
| 315 |
+
add_compile_options(-Wno-bit-int-extension)
|
| 316 |
+
message(STATUS "CK compiled with USE_BITINT_EXTENSION_INT4 set to ${USE_BITINT_EXTENSION_INT4}")
|
| 317 |
+
endif()
|
| 318 |
+
|
| 319 |
+
if(USE_OPT_GFX11)
|
| 320 |
+
add_compile_options(-mcumode)
|
| 321 |
+
add_compile_options(-mno-wavefrontsize64)
|
| 322 |
+
message(STATUS "CK compiled with USE_OPT_GFX11 set to ${USE_OPT_GFX11}")
|
| 323 |
+
endif()
|
| 324 |
+
|
| 325 |
+
if(ENABLE_ASM_DUMP)
|
| 326 |
+
add_compile_options(--save-temps)
|
| 327 |
+
add_compile_options(-Wno-gnu-line-marker)
|
| 328 |
+
message("CK compiled with ENABLE_ASM_DUMP set to ${ENABLE_ASM_DUMP}")
|
| 329 |
+
endif()
|
| 330 |
+
|
| 331 |
+
## Threads
|
| 332 |
+
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
| 333 |
+
find_package(Threads REQUIRED)
|
| 334 |
+
link_libraries(Threads::Threads)
|
| 335 |
+
|
| 336 |
+
## C++
|
| 337 |
+
set(CMAKE_CXX_STANDARD 17)
|
| 338 |
+
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
| 339 |
+
set(CMAKE_CXX_EXTENSIONS OFF)
|
| 340 |
+
message(STATUS "CMAKE_CXX_COMPILER: ${CMAKE_CXX_COMPILER}")
|
| 341 |
+
|
| 342 |
+
# https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_macros.html
|
| 343 |
+
# _GLIBCXX_ASSERTIONS
|
| 344 |
+
# Undefined by default. When defined, enables extra error checking in the form of
|
| 345 |
+
# precondition assertions, such as bounds checking in strings and null pointer
|
| 346 |
+
# checks when dereferencing smart pointers
|
| 347 |
+
option(USE_GLIBCXX_ASSERTIONS "Turn on additional c++ library checks." OFF)
|
| 348 |
+
if(USE_GLIBCXX_ASSERTIONS)
|
| 349 |
+
add_compile_options(-Wp,-D_GLIBCXX_ASSERTIONS)
|
| 350 |
+
endif()
|
| 351 |
+
|
| 352 |
+
## HIP
|
| 353 |
+
set(CMAKE_HIP_PLATFORM amd)
|
| 354 |
+
set(CMAKE_HIP_COMPILER ${CMAKE_CXX_COMPILER})
|
| 355 |
+
set(CMAKE_HIP_EXTENSIONS ON)
|
| 356 |
+
message(STATUS "CMAKE_HIP_COMPILER: ${CMAKE_HIP_COMPILER}")
|
| 357 |
+
|
| 358 |
+
## OpenMP
|
| 359 |
+
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
| 360 |
+
# workaround issue hipcc in rocm3.5 cannot find openmp
|
| 361 |
+
set(OpenMP_CXX "${CMAKE_CXX_COMPILER}")
|
| 362 |
+
set(OpenMP_CXX_FLAGS "-fopenmp=libomp -Wno-unused-command-line-argument")
|
| 363 |
+
set(OpenMP_CXX_LIB_NAMES "libomp" "libgomp" "libiomp5")
|
| 364 |
+
set(OpenMP_libomp_LIBRARY ${OpenMP_CXX_LIB_NAMES})
|
| 365 |
+
set(OpenMP_libgomp_LIBRARY ${OpenMP_CXX_LIB_NAMES})
|
| 366 |
+
set(OpenMP_libiomp5_LIBRARY ${OpenMP_CXX_LIB_NAMES})
|
| 367 |
+
else()
|
| 368 |
+
find_package(OpenMP REQUIRED)
|
| 369 |
+
endif()
|
| 370 |
+
|
| 371 |
+
message(STATUS "OpenMP_CXX_LIB_NAMES: ${OpenMP_CXX_LIB_NAMES}")
|
| 372 |
+
message(STATUS "OpenMP_gomp_LIBRARY: ${OpenMP_gomp_LIBRARY}")
|
| 373 |
+
message(STATUS "OpenMP_pthread_LIBRARY: ${OpenMP_pthread_LIBRARY}")
|
| 374 |
+
message(STATUS "OpenMP_CXX_FLAGS: ${OpenMP_CXX_FLAGS}")
|
| 375 |
+
|
| 376 |
+
link_libraries(${OpenMP_gomp_LIBRARY})
|
| 377 |
+
link_libraries(${OpenMP_pthread_LIBRARY})
|
| 378 |
+
|
| 379 |
+
## HIP
|
| 380 |
+
# Override HIP version in config.h, if necessary.
|
| 381 |
+
# The variables set by find_package() can't be overwritten,
|
| 382 |
+
# therefore let's use intermediate variables.
|
| 383 |
+
set(CK_HIP_VERSION_MAJOR "${HIP_VERSION_MAJOR}")
|
| 384 |
+
set(CK_HIP_VERSION_MINOR "${HIP_VERSION_MINOR}")
|
| 385 |
+
set(CK_HIP_VERSION_PATCH "${HIP_VERSION_PATCH}")
|
| 386 |
+
if( DEFINED CK_OVERRIDE_HIP_VERSION_MAJOR )
|
| 387 |
+
set(CK_HIP_VERSION_MAJOR "${CK_OVERRIDE_HIP_VERSION_MAJOR}")
|
| 388 |
+
message(STATUS "CK_HIP_VERSION_MAJOR overriden with ${CK_OVERRIDE_HIP_VERSION_MAJOR}")
|
| 389 |
+
endif()
|
| 390 |
+
if( DEFINED CK_OVERRIDE_HIP_VERSION_MINOR )
|
| 391 |
+
set(CK_HIP_VERSION_MINOR "${CK_OVERRIDE_HIP_VERSION_MINOR}")
|
| 392 |
+
message(STATUS "CK_HIP_VERSION_MINOR overriden with ${CK_OVERRIDE_HIP_VERSION_MINOR}")
|
| 393 |
+
endif()
|
| 394 |
+
if( DEFINED CK_OVERRIDE_HIP_VERSION_PATCH )
|
| 395 |
+
set(CK_HIP_VERSION_PATCH "${CK_OVERRIDE_HIP_VERSION_PATCH}")
|
| 396 |
+
message(STATUS "CK_HIP_VERSION_PATCH overriden with ${CK_OVERRIDE_HIP_VERSION_PATCH}")
|
| 397 |
+
endif()
|
| 398 |
+
message(STATUS "Build with HIP ${HIP_VERSION}")
|
| 399 |
+
link_libraries(hip::device)
|
| 400 |
+
if(CK_hip_VERSION VERSION_GREATER_EQUAL 6.0.23494)
|
| 401 |
+
add_compile_definitions(__HIP_PLATFORM_AMD__=1)
|
| 402 |
+
else()
|
| 403 |
+
add_compile_definitions(__HIP_PLATFORM_HCC__=1)
|
| 404 |
+
endif()
|
| 405 |
+
|
| 406 |
+
include(EnableCompilerWarnings)
|
| 407 |
+
## tidy
|
| 408 |
+
set(CK_TIDY_ERRORS ERRORS * -readability-inconsistent-declaration-parameter-name)
|
| 409 |
+
if(CMAKE_CXX_COMPILER MATCHES ".*hcc" OR CMAKE_CXX_COMPILER MATCHES ".*clang\\+\\+")
|
| 410 |
+
set(CK_TIDY_CHECKS -modernize-use-override -readability-non-const-parameter)
|
| 411 |
+
# Enable tidy on hip
|
| 412 |
+
elseif(CK_BACKEND STREQUAL "HIP" OR CK_BACKEND STREQUAL "HIPNOGPU")
|
| 413 |
+
set(CK_TIDY_ERRORS ALL)
|
| 414 |
+
endif()
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
if(ENABLE_CLANG_CPP_CHECKS)
|
| 418 |
+
include(ClangTidy)
|
| 419 |
+
enable_clang_tidy(
|
| 420 |
+
CHECKS
|
| 421 |
+
*
|
| 422 |
+
-abseil-*
|
| 423 |
+
-android-cloexec-fopen
|
| 424 |
+
# Yea we shouldn't be using rand()
|
| 425 |
+
-cert-msc30-c
|
| 426 |
+
-bugprone-exception-escape
|
| 427 |
+
-bugprone-macro-parentheses
|
| 428 |
+
-cert-env33-c
|
| 429 |
+
-cert-msc32-c
|
| 430 |
+
-cert-msc50-cpp
|
| 431 |
+
-cert-msc51-cpp
|
| 432 |
+
-cert-dcl37-c
|
| 433 |
+
-cert-dcl51-cpp
|
| 434 |
+
-clang-analyzer-alpha.core.CastToStruct
|
| 435 |
+
-clang-analyzer-optin.performance.Padding
|
| 436 |
+
-clang-diagnostic-deprecated-declarations
|
| 437 |
+
-clang-diagnostic-extern-c-compat
|
| 438 |
+
-clang-diagnostic-unused-command-line-argument
|
| 439 |
+
-cppcoreguidelines-avoid-c-arrays
|
| 440 |
+
-cppcoreguidelines-avoid-magic-numbers
|
| 441 |
+
-cppcoreguidelines-explicit-virtual-functions
|
| 442 |
+
-cppcoreguidelines-init-variables
|
| 443 |
+
-cppcoreguidelines-macro-usage
|
| 444 |
+
-cppcoreguidelines-non-private-member-variables-in-classes
|
| 445 |
+
-cppcoreguidelines-pro-bounds-array-to-pointer-decay
|
| 446 |
+
-cppcoreguidelines-pro-bounds-constant-array-index
|
| 447 |
+
-cppcoreguidelines-pro-bounds-pointer-arithmetic
|
| 448 |
+
-cppcoreguidelines-pro-type-member-init
|
| 449 |
+
-cppcoreguidelines-pro-type-reinterpret-cast
|
| 450 |
+
-cppcoreguidelines-pro-type-union-access
|
| 451 |
+
-cppcoreguidelines-pro-type-vararg
|
| 452 |
+
-cppcoreguidelines-special-member-functions
|
| 453 |
+
-fuchsia-*
|
| 454 |
+
-google-explicit-constructor
|
| 455 |
+
-google-readability-braces-around-statements
|
| 456 |
+
-google-readability-todo
|
| 457 |
+
-google-runtime-int
|
| 458 |
+
-google-runtime-references
|
| 459 |
+
-hicpp-vararg
|
| 460 |
+
-hicpp-braces-around-statements
|
| 461 |
+
-hicpp-explicit-conversions
|
| 462 |
+
-hicpp-named-parameter
|
| 463 |
+
-hicpp-no-array-decay
|
| 464 |
+
# We really shouldn't use bitwise operators with signed integers, but
|
| 465 |
+
# opencl leaves us no choice
|
| 466 |
+
-hicpp-avoid-c-arrays
|
| 467 |
+
-hicpp-signed-bitwise
|
| 468 |
+
-hicpp-special-member-functions
|
| 469 |
+
-hicpp-uppercase-literal-suffix
|
| 470 |
+
-hicpp-use-auto
|
| 471 |
+
-hicpp-use-equals-default
|
| 472 |
+
-hicpp-use-override
|
| 473 |
+
-llvm-header-guard
|
| 474 |
+
-llvm-include-order
|
| 475 |
+
#-llvmlibc-*
|
| 476 |
+
-llvmlibc-restrict-system-libc-headers
|
| 477 |
+
-llvmlibc-callee-namespace
|
| 478 |
+
-llvmlibc-implementation-in-namespace
|
| 479 |
+
-llvm-else-after-return
|
| 480 |
+
-llvm-qualified-auto
|
| 481 |
+
-misc-misplaced-const
|
| 482 |
+
-misc-non-private-member-variables-in-classes
|
| 483 |
+
-misc-no-recursion
|
| 484 |
+
-modernize-avoid-bind
|
| 485 |
+
-modernize-avoid-c-arrays
|
| 486 |
+
-modernize-pass-by-value
|
| 487 |
+
-modernize-use-auto
|
| 488 |
+
-modernize-use-default-member-init
|
| 489 |
+
-modernize-use-equals-default
|
| 490 |
+
-modernize-use-trailing-return-type
|
| 491 |
+
-modernize-use-transparent-functors
|
| 492 |
+
-performance-unnecessary-value-param
|
| 493 |
+
-readability-braces-around-statements
|
| 494 |
+
-readability-else-after-return
|
| 495 |
+
# we are not ready to use it, but very useful
|
| 496 |
+
-readability-function-cognitive-complexity
|
| 497 |
+
-readability-isolate-declaration
|
| 498 |
+
-readability-magic-numbers
|
| 499 |
+
-readability-named-parameter
|
| 500 |
+
-readability-uppercase-literal-suffix
|
| 501 |
+
-readability-convert-member-functions-to-static
|
| 502 |
+
-readability-qualified-auto
|
| 503 |
+
-readability-redundant-string-init
|
| 504 |
+
# too many narrowing conversions in our code
|
| 505 |
+
-bugprone-narrowing-conversions
|
| 506 |
+
-cppcoreguidelines-narrowing-conversions
|
| 507 |
+
-altera-struct-pack-align
|
| 508 |
+
-cppcoreguidelines-prefer-member-initializer
|
| 509 |
+
${CK_TIDY_CHECKS}
|
| 510 |
+
${CK_TIDY_ERRORS}
|
| 511 |
+
HEADER_FILTER
|
| 512 |
+
"\.hpp$"
|
| 513 |
+
EXTRA_ARGS
|
| 514 |
+
-DCK_USE_CLANG_TIDY
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
include(CppCheck)
|
| 518 |
+
enable_cppcheck(
|
| 519 |
+
CHECKS
|
| 520 |
+
warning
|
| 521 |
+
style
|
| 522 |
+
performance
|
| 523 |
+
portability
|
| 524 |
+
SUPPRESS
|
| 525 |
+
ConfigurationNotChecked
|
| 526 |
+
constStatement
|
| 527 |
+
duplicateCondition
|
| 528 |
+
noExplicitConstructor
|
| 529 |
+
passedByValue
|
| 530 |
+
preprocessorErrorDirective
|
| 531 |
+
shadowVariable
|
| 532 |
+
unusedFunction
|
| 533 |
+
unusedPrivateFunction
|
| 534 |
+
unusedStructMember
|
| 535 |
+
unmatchedSuppression
|
| 536 |
+
FORCE
|
| 537 |
+
SOURCES
|
| 538 |
+
library/src
|
| 539 |
+
INCLUDE
|
| 540 |
+
${CMAKE_CURRENT_SOURCE_DIR}/include
|
| 541 |
+
${CMAKE_CURRENT_BINARY_DIR}/include
|
| 542 |
+
${CMAKE_CURRENT_SOURCE_DIR}/library/include
|
| 543 |
+
DEFINE
|
| 544 |
+
CPPCHECK=1
|
| 545 |
+
__linux__=1
|
| 546 |
+
)
|
| 547 |
+
else()
|
| 548 |
+
function(clang_tidy_check TARGET)
|
| 549 |
+
# stub out empty function if clang tidy is not enabled
|
| 550 |
+
endfunction()
|
| 551 |
+
endif()
|
| 552 |
+
|
| 553 |
+
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
|
| 554 |
+
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
|
| 555 |
+
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin)
|
| 556 |
+
|
| 557 |
+
# set CK project include directories
|
| 558 |
+
include_directories(BEFORE
|
| 559 |
+
${PROJECT_BINARY_DIR}/include
|
| 560 |
+
${PROJECT_SOURCE_DIR}/include
|
| 561 |
+
${PROJECT_SOURCE_DIR}/library/include
|
| 562 |
+
${HIP_INCLUDE_DIRS}
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
SET(BUILD_DEV ON CACHE BOOL "BUILD_DEV")
|
| 566 |
+
if(BUILD_DEV)
|
| 567 |
+
add_compile_options(-Werror)
|
| 568 |
+
add_compile_options(-Weverything)
|
| 569 |
+
endif()
|
| 570 |
+
message(STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}")
|
| 571 |
+
|
| 572 |
+
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
| 573 |
+
add_compile_options(-fcolor-diagnostics)
|
| 574 |
+
endif()
|
| 575 |
+
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9)
|
| 576 |
+
add_compile_options(-fdiagnostics-color=always)
|
| 577 |
+
endif()
|
| 578 |
+
|
| 579 |
+
if(NOT MIOPEN_REQ_LIBS_ONLY)
|
| 580 |
+
# make check runs the entire set of examples and tests
|
| 581 |
+
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -C ${CMAKE_CFG_INTDIR})
|
| 582 |
+
# make smoke runs the tests and examples that runs within 30 seconds on gfx90a
|
| 583 |
+
add_custom_target(smoke COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -C ${CMAKE_CFG_INTDIR} -L "SMOKE_TEST")
|
| 584 |
+
# make regression runs the tests and examples that runs for more 30 seconds on gfx90a
|
| 585 |
+
add_custom_target(regression COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure -C ${CMAKE_CFG_INTDIR} -L "REGRESSION_TEST")
|
| 586 |
+
endif()
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
file(GLOB_RECURSE INSTANCE_FILES "${PROJECT_SOURCE_DIR}/*/device_*_instance.cpp")
|
| 591 |
+
file(GLOB dir_list RELATIVE ${PROJECT_SOURCE_DIR}/library/src/tensor_operation_instance/gpu ${PROJECT_SOURCE_DIR}/library/src/tensor_operation_instance/gpu/*)
|
| 592 |
+
set(CK_DEVICE_INSTANCES)
|
| 593 |
+
FOREACH(subdir_path ${dir_list})
|
| 594 |
+
set(target_dir)
|
| 595 |
+
IF(IS_DIRECTORY "${PROJECT_SOURCE_DIR}/library/src/tensor_operation_instance/gpu/${subdir_path}")
|
| 596 |
+
set(cmake_instance)
|
| 597 |
+
file(READ "${PROJECT_SOURCE_DIR}/library/src/tensor_operation_instance/gpu/${subdir_path}/CMakeLists.txt" cmake_instance)
|
| 598 |
+
set(add_inst 0)
|
| 599 |
+
if(("${cmake_instance}" MATCHES "fp8" OR "${cmake_instance}" MATCHES "_f8") AND DTYPES MATCHES "fp8")
|
| 600 |
+
set(add_inst 1)
|
| 601 |
+
endif()
|
| 602 |
+
if(("${cmake_instance}" MATCHES "bf8" OR "${cmake_instance}" MATCHES "_b8") AND DTYPES MATCHES "bf8")
|
| 603 |
+
set(add_inst 1)
|
| 604 |
+
endif()
|
| 605 |
+
if(("${cmake_instance}" MATCHES "fp16" OR "${cmake_instance}" MATCHES "_f16") AND DTYPES MATCHES "fp16")
|
| 606 |
+
set(add_inst 1)
|
| 607 |
+
endif()
|
| 608 |
+
if(("${cmake_instance}" MATCHES "fp32" OR "${cmake_instance}" MATCHES "_f32") AND DTYPES MATCHES "fp32")
|
| 609 |
+
set(add_inst 1)
|
| 610 |
+
endif()
|
| 611 |
+
if(("${cmake_instance}" MATCHES "fp64" OR "${cmake_instance}" MATCHES "_f64") AND DTYPES MATCHES "fp64")
|
| 612 |
+
set(add_inst 1)
|
| 613 |
+
endif()
|
| 614 |
+
if(("${cmake_instance}" MATCHES "bf16" OR "${cmake_instance}" MATCHES "_b16") AND DTYPES MATCHES "bf16")
|
| 615 |
+
set(add_inst 1)
|
| 616 |
+
endif()
|
| 617 |
+
if(("${cmake_instance}" MATCHES "int8" OR "${cmake_instance}" MATCHES "_i8") AND DTYPES MATCHES "int8")
|
| 618 |
+
set(add_inst 1)
|
| 619 |
+
endif()
|
| 620 |
+
if(NOT "${cmake_instance}" MATCHES "DTYPES")
|
| 621 |
+
set(add_inst 1)
|
| 622 |
+
endif()
|
| 623 |
+
if(add_inst EQUAL 1 OR NOT DEFINED DTYPES)
|
| 624 |
+
list(APPEND CK_DEVICE_INSTANCES device_${subdir_path}_instance)
|
| 625 |
+
endif()
|
| 626 |
+
ENDIF()
|
| 627 |
+
ENDFOREACH()
|
| 628 |
+
|
| 629 |
+
add_custom_target(instances DEPENDS utility;${CK_DEVICE_INSTANCES} SOURCES ${INSTANCE_FILES})
|
| 630 |
+
|
| 631 |
+
option(MIOPEN_REQ_LIBS_ONLY "Build only the MIOpen required libraries" OFF)
|
| 632 |
+
option(DISABLE_OFFLOAD_COMPRESS "Disable offload compress compiler flag when building instances" OFF)
|
| 633 |
+
option(BUILD_MHA_LIB "Build the static library for flash attention" OFF)
|
| 634 |
+
|
| 635 |
+
add_subdirectory(library)
|
| 636 |
+
|
| 637 |
+
if(NOT GPU_ARCHS AND USER_GPU_TARGETS)
|
| 638 |
+
rocm_package_setup_component(tests
|
| 639 |
+
LIBRARY_NAME composablekernel
|
| 640 |
+
PACKAGE_NAME tests # Prevent -static suffix on package name
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
rocm_package_setup_component(examples
|
| 644 |
+
LIBRARY_NAME composablekernel
|
| 645 |
+
PACKAGE_NAME examples
|
| 646 |
+
)
|
| 647 |
+
add_subdirectory(example)
|
| 648 |
+
add_subdirectory(tile_engine)
|
| 649 |
+
if(BUILD_TESTING)
|
| 650 |
+
add_subdirectory(test)
|
| 651 |
+
endif()
|
| 652 |
+
endif()
|
| 653 |
+
|
| 654 |
+
if (NOT MIOPEN_REQ_LIBS_ONLY)
|
| 655 |
+
rocm_package_setup_component(profiler
|
| 656 |
+
LIBRARY_NAME composablekernel
|
| 657 |
+
PACKAGE_NAME ckprofiler
|
| 658 |
+
)
|
| 659 |
+
add_subdirectory(profiler)
|
| 660 |
+
endif()
|
| 661 |
+
|
| 662 |
+
if(CK_USE_CODEGEN AND (SUPPORTED_GPU_TARGETS MATCHES "gfx9" OR GPU_ARCHS))
|
| 663 |
+
add_subdirectory(codegen)
|
| 664 |
+
endif()
|
| 665 |
+
|
| 666 |
+
#Create an interface target for the include only files and call it "composablekernels"
|
| 667 |
+
include(CMakePackageConfigHelpers)
|
| 668 |
+
|
| 669 |
+
write_basic_package_version_file(
|
| 670 |
+
"${CMAKE_CURRENT_BINARY_DIR}/composable_kernelConfigVersion.cmake"
|
| 671 |
+
VERSION "${version}"
|
| 672 |
+
COMPATIBILITY AnyNewerVersion
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
configure_package_config_file(${CMAKE_CURRENT_SOURCE_DIR}/Config.cmake.in
|
| 676 |
+
"${CMAKE_CURRENT_BINARY_DIR}/composable_kernelConfig.cmake"
|
| 677 |
+
INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/composable_kernel
|
| 678 |
+
NO_CHECK_REQUIRED_COMPONENTS_MACRO
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
rocm_install(FILES
|
| 682 |
+
"${CMAKE_CURRENT_BINARY_DIR}/composable_kernelConfig.cmake"
|
| 683 |
+
"${CMAKE_CURRENT_BINARY_DIR}/composable_kernelConfigVersion.cmake"
|
| 684 |
+
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/composable_kernel
|
| 685 |
+
)
|
| 686 |
+
|
| 687 |
+
# Install CK version and configuration files
|
| 688 |
+
rocm_install(FILES
|
| 689 |
+
${PROJECT_BINARY_DIR}/include/ck/version.h
|
| 690 |
+
${PROJECT_BINARY_DIR}/include/ck/config.h
|
| 691 |
+
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/ck/
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
|
| 695 |
+
set(CPACK_RPM_PACKAGE_LICENSE "MIT")
|
| 696 |
+
|
| 697 |
+
rocm_create_package(
|
| 698 |
+
NAME composablekernel
|
| 699 |
+
DESCRIPTION "High Performance Composable Kernel for AMD GPUs"
|
| 700 |
+
MAINTAINER "MIOpen Kernels Dev Team <dl.MIOpen@amd.com>"
|
| 701 |
+
LDCONFIG
|
| 702 |
+
HEADER_ONLY
|
| 703 |
+
)
|
Code/Baselines/flash-attention/csrc/composable_kernel/CONTRIBUTORS.md
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[Back to the main page](./README.md)
|
| 2 |
+
# Composable Kernel Developers and Contributors
|
| 3 |
+
|
| 4 |
+
This is the list of developers and contributors to Composable Kernel library
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
## Developers
|
| 8 |
+
[Chao Liu](https://github.com/asroy), [Jing Zhang](https://github.com/zjing14), 2018-2023
|
| 9 |
+
|
| 10 |
+
[Letao Qin](https://github.com/ltqin), [Qianfeng Zhang](https://github.com/qianfengz), [Liang Huang](https://github.com/carlushuang), [Shaojie Wang](https://github.com/shaojiewang), 2019-2023
|
| 11 |
+
|
| 12 |
+
[Anthony Chang](https://github.com/rosenrodt), [Chunyu Lai](https://github.com/rocking5566), [Illia Silin](https://github.com/illsilin), [Adam Osewski](https://github.com/aosewski), [Poyen Chen](https://github.com/poyenc), [Rosty Geyyer](https://github.com/geyyer), [Astha Rai](https://github.com/arai713), [Shi YanXing](https://github.com/Yanxing-Shi), 2022-2023
|
| 13 |
+
|
| 14 |
+
[Hari Sadasivan](https://github.com/hsadasiv), [Bartlomiej Kocot](https://github.com/bartekxk), [Bartlomiej Wroblewski](https://github.com/bwroblew), 2023
|
| 15 |
+
|
| 16 |
+
Hanwen Chang, 2019-2021,
|
| 17 |
+
|
| 18 |
+
Tejash Shah, 2019-2020
|
| 19 |
+
|
| 20 |
+
Xiaoyan Zhou, 2020
|
| 21 |
+
|
| 22 |
+
[Jianfeng Yan](https://github.com/j4yan), 2021-2022
|
| 23 |
+
[Jun Liu](https://github.com/junliume), 2021-2024
|
| 24 |
+
|
| 25 |
+
## Product Manager
|
| 26 |
+
[John Afaganis](https://github.com/afagaj)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
## Contributors
|
| 31 |
+
[Dan Yao](https://github.com/danyao12), [Guangzhao Lu](https://github.com/guangzlu), [Raman Jana](https://github.com/ramjana), [Jehandad Khan](https://github.com/JehandadKhan), [Wen-Heng (Jack) Chung](https://github.com/whchung)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
## Acknowledgement
|
| 35 |
+
CK team works closely with Meta [AITemplate](https://github.com/facebookincubator/AITemplate) team ([Bing Xu](https://github.com/antinucleon), [Hao Lu](https://github.com/hlu1), [Ying Zhang](https://github.com/ipiszy), etc). Most of the lucrative graph optimization opportunities in ML models were identified by AITemplate team, and we also co-designed many high performance fused kernels for AMD GPUs. Without this collaboration, CK would not reach its current potential.
|
Code/Baselines/flash-attention/csrc/composable_kernel/Config.cmake.in
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@PACKAGE_INIT@
|
| 2 |
+
|
| 3 |
+
set(_composable_kernel_supported_components device_other_operations device_gemm_operations device_conv_operations device_mha_operations device_contraction_operations device_reduction_operations utility)
|
| 4 |
+
|
| 5 |
+
foreach(_comp ${composable_kernel_FIND_COMPONENTS})
|
| 6 |
+
if(NOT _comp IN_LIST _composable_kernel_supported_components)
|
| 7 |
+
set(composable_kernel_FOUND False)
|
| 8 |
+
set(composable_kernel_NOT_FOUND_MESSAGE "Unsupported component: ${_comp}")
|
| 9 |
+
endif()
|
| 10 |
+
include("${CMAKE_CURRENT_LIST_DIR}/composable_kernel${_comp}Targets.cmake")
|
| 11 |
+
endforeach()
|
Code/Baselines/flash-attention/csrc/composable_kernel/Dockerfile
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM ubuntu:24.04
|
| 2 |
+
ARG DEBIAN_FRONTEND=noninteractive
|
| 3 |
+
ARG ROCMVERSION=6.4.1
|
| 4 |
+
ARG compiler_version=""
|
| 5 |
+
ARG compiler_commit=""
|
| 6 |
+
ARG CK_SCCACHE=""
|
| 7 |
+
ARG DEB_ROCM_REPO=http://repo.radeon.com/rocm/apt/.apt_$ROCMVERSION/
|
| 8 |
+
ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
|
| 9 |
+
|
| 10 |
+
# Add rocm repository
|
| 11 |
+
RUN set -xe && \
|
| 12 |
+
apt-get update && apt-get install -y --allow-unauthenticated apt-utils wget gnupg2 curl && \
|
| 13 |
+
curl -fsSL https://repo.radeon.com/rocm/rocm.gpg.key | gpg --dearmor -o /etc/apt/trusted.gpg.d/rocm-keyring.gpg
|
| 14 |
+
|
| 15 |
+
RUN if [ "$ROCMVERSION" != "6.5" ]; then \
|
| 16 |
+
sh -c "wget https://repo.radeon.com/amdgpu-install/$ROCMVERSION/ubuntu/jammy/amdgpu-install_6.4.60401-1_all.deb --no-check-certificate" && \
|
| 17 |
+
apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated ./amdgpu-install_6.4.60401-1_all.deb && \
|
| 18 |
+
wget -qO - http://repo.radeon.com/rocm/rocm.gpg.key | apt-key add - && \
|
| 19 |
+
sh -c "echo deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/rocm-keyring.gpg] $DEB_ROCM_REPO jammy main > /etc/apt/sources.list.d/rocm.list" && \
|
| 20 |
+
sh -c 'echo deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/rocm-keyring.gpg] https://repo.radeon.com/amdgpu/$ROCMVERSION/ubuntu jammy main > /etc/apt/sources.list.d/amdgpu.list'; \
|
| 21 |
+
fi
|
| 22 |
+
|
| 23 |
+
RUN sh -c "echo deb http://mirrors.kernel.org/ubuntu jammy main universe | tee -a /etc/apt/sources.list" && \
|
| 24 |
+
amdgpu-install -y --usecase=rocm --no-dkms
|
| 25 |
+
|
| 26 |
+
## Sccache binary built from source for ROCm, only install if CK_SCCACHE is defined
|
| 27 |
+
ARG SCCACHE_REPO_URL=http://compute-artifactory.amd.com/artifactory/rocm-generic-experimental/rocm-sccache
|
| 28 |
+
ENV SCCACHE_INSTALL_LOCATION=/usr/local/.cargo/bin
|
| 29 |
+
ENV PATH=$PATH:${SCCACHE_INSTALL_LOCATION}
|
| 30 |
+
ENV CK_SCCACHE=$CK_SCCACHE
|
| 31 |
+
RUN if [ "$CK_SCCACHE" != "" ]; then \
|
| 32 |
+
mkdir -p ${SCCACHE_INSTALL_LOCATION} && \
|
| 33 |
+
curl ${SCCACHE_REPO_URL}/portable/0.2.16/sccache-0.2.16-alpha.1-rocm --output ${SCCACHE_INSTALL_LOCATION}/sccache && \
|
| 34 |
+
chmod +x ${SCCACHE_INSTALL_LOCATION}/sccache; \
|
| 35 |
+
fi
|
| 36 |
+
|
| 37 |
+
# Install dependencies
|
| 38 |
+
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated \
|
| 39 |
+
build-essential \
|
| 40 |
+
cmake \
|
| 41 |
+
git \
|
| 42 |
+
hip-rocclr \
|
| 43 |
+
iputils-ping \
|
| 44 |
+
jq \
|
| 45 |
+
libelf-dev \
|
| 46 |
+
libnuma-dev \
|
| 47 |
+
libpthread-stubs0-dev \
|
| 48 |
+
llvm-amdgpu \
|
| 49 |
+
mpich \
|
| 50 |
+
net-tools \
|
| 51 |
+
pkg-config \
|
| 52 |
+
python3-full \
|
| 53 |
+
redis \
|
| 54 |
+
rocm-llvm-dev \
|
| 55 |
+
sshpass \
|
| 56 |
+
stunnel \
|
| 57 |
+
software-properties-common \
|
| 58 |
+
vim \
|
| 59 |
+
nano \
|
| 60 |
+
zlib1g-dev \
|
| 61 |
+
zip \
|
| 62 |
+
libzstd-dev \
|
| 63 |
+
openssh-server \
|
| 64 |
+
clang-format-12 \
|
| 65 |
+
kmod && \
|
| 66 |
+
apt-get clean && \
|
| 67 |
+
rm -rf /var/lib/apt/lists/* && \
|
| 68 |
+
rm -rf amdgpu-install* && \
|
| 69 |
+
# Remove unnecessary rocm components that take a lot of space
|
| 70 |
+
apt-get remove -y rocblas rocfft rocsparse composablekernel-dev hipblaslt
|
| 71 |
+
|
| 72 |
+
#Install latest ccache
|
| 73 |
+
RUN git clone https://github.com/ccache/ccache.git && \
|
| 74 |
+
cd ccache && mkdir build && cd build && cmake .. && make install && \
|
| 75 |
+
#Install ninja build tracing tools
|
| 76 |
+
cd / && \
|
| 77 |
+
wget -qO /usr/local/bin/ninja.gz https://github.com/ninja-build/ninja/releases/latest/download/ninja-linux.zip && \
|
| 78 |
+
gunzip /usr/local/bin/ninja.gz && \
|
| 79 |
+
chmod a+x /usr/local/bin/ninja && \
|
| 80 |
+
git clone https://github.com/nico/ninjatracing.git && \
|
| 81 |
+
#Install ClangBuildAnalyzer
|
| 82 |
+
git clone https://github.com/aras-p/ClangBuildAnalyzer.git && \
|
| 83 |
+
cd ClangBuildAnalyzer/ && \
|
| 84 |
+
make -f projects/make/Makefile && \
|
| 85 |
+
cd / && \
|
| 86 |
+
#Install latest cppcheck
|
| 87 |
+
git clone https://github.com/danmar/cppcheck.git && \
|
| 88 |
+
cd cppcheck && mkdir build && cd build && cmake .. && cmake --build . && \
|
| 89 |
+
cd / && \
|
| 90 |
+
# Install an init system
|
| 91 |
+
wget https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
|
| 92 |
+
dpkg -i dumb-init_*.deb && rm dumb-init_*.deb && \
|
| 93 |
+
# Install packages for processing the performance results
|
| 94 |
+
pip3 install --break-system-packages --upgrade pytest pymysql pandas==2.2.3 sqlalchemy==2.0.3 setuptools-rust setuptools sshtunnel==0.4.0 && \
|
| 95 |
+
# Add render group
|
| 96 |
+
groupadd -f render && \
|
| 97 |
+
# Install the new rocm-cmake version
|
| 98 |
+
git clone -b master https://github.com/ROCm/rocm-cmake.git && \
|
| 99 |
+
cd rocm-cmake && mkdir build && cd build && \
|
| 100 |
+
cmake .. && cmake --build . && cmake --build . --target install
|
| 101 |
+
|
| 102 |
+
WORKDIR /
|
| 103 |
+
# Add alternative compilers, if necessary
|
| 104 |
+
ENV compiler_version=$compiler_version
|
| 105 |
+
ENV compiler_commit=$compiler_commit
|
| 106 |
+
RUN sh -c "echo compiler version = '$compiler_version'" && \
|
| 107 |
+
sh -c "echo compiler commit = '$compiler_commit'"
|
| 108 |
+
|
| 109 |
+
RUN if ( [ "$compiler_version" = "amd-staging" ] || [ "$compiler_version" = "amd-mainline" ] ) && [ "$compiler_commit" = "" ]; then \
|
| 110 |
+
git clone -b "$compiler_version" https://github.com/ROCm/llvm-project.git && \
|
| 111 |
+
cd llvm-project && mkdir build && cd build && \
|
| 112 |
+
cmake -DCMAKE_INSTALL_PREFIX=/opt/rocm/llvm -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=1 -DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" -DLLVM_ENABLE_PROJECTS="clang;lld" -DLLVM_ENABLE_RUNTIMES="compiler-rt" ../llvm && \
|
| 113 |
+
make -j 8 ; \
|
| 114 |
+
else echo "using the release compiler"; \
|
| 115 |
+
fi
|
| 116 |
+
|
| 117 |
+
RUN if ( [ "$compiler_version" = "amd-staging" ] || [ "$compiler_version" = "amd-mainline" ] ) && [ "$compiler_commit" != "" ]; then \
|
| 118 |
+
git clone -b "$compiler_version" https://github.com/ROCm/llvm-project.git && \
|
| 119 |
+
cd llvm-project && git checkout "$compiler_commit" && echo "checking out commit $compiler_commit" && mkdir build && cd build && \
|
| 120 |
+
cmake -DCMAKE_INSTALL_PREFIX=/opt/rocm/llvm -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=1 -DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" -DLLVM_ENABLE_PROJECTS="clang;lld" -DLLVM_ENABLE_RUNTIMES="compiler-rt" ../llvm && \
|
| 121 |
+
make -j 8 ; \
|
| 122 |
+
else echo "using the release compiler"; \
|
| 123 |
+
fi
|
Code/Baselines/flash-attention/csrc/composable_kernel/Dockerfile.compiler
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ARG BASE_DOCKER="rocm/composable_kernel:ck_ub24.04_rocm6.4.1"
|
| 2 |
+
FROM $BASE_DOCKER
|
| 3 |
+
ARG compiler_version=""
|
| 4 |
+
ARG compiler_commit=""
|
| 5 |
+
|
| 6 |
+
# Add alternative compilers, if necessary
|
| 7 |
+
ENV compiler_version=$compiler_version
|
| 8 |
+
ENV compiler_commit=$compiler_commit
|
| 9 |
+
RUN sh -c "echo compiler version = '$compiler_version'" && \
|
| 10 |
+
sh -c "echo compiler commit = '$compiler_commit'"
|
| 11 |
+
|
| 12 |
+
RUN if ( [ "$compiler_version" = "amd-staging" ] || [ "$compiler_version" = "amd-mainline" ] ) && [ "$compiler_commit" = "" ]; then \
|
| 13 |
+
git clone -b "$compiler_version" https://github.com/ROCm/llvm-project.git && \
|
| 14 |
+
cd llvm-project && mkdir build && cd build && \
|
| 15 |
+
cmake -DCMAKE_INSTALL_PREFIX=/opt/rocm/llvm -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=1 -DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" -DLLVM_ENABLE_PROJECTS="clang;lld" -DLLVM_ENABLE_RUNTIMES="compiler-rt" ../llvm && \
|
| 16 |
+
make -j 16 ; \
|
| 17 |
+
else echo "using the release compiler"; \
|
| 18 |
+
fi
|
| 19 |
+
|
| 20 |
+
RUN if ( [ "$compiler_version" = "amd-staging" ] || [ "$compiler_version" = "amd-mainline" ] ) && [ "$compiler_commit" != "" ]; then \
|
| 21 |
+
git clone -b "$compiler_version" https://github.com/ROCm/llvm-project.git && \
|
| 22 |
+
cd llvm-project && git checkout "$compiler_commit" && echo "checking out commit $compiler_commit" && mkdir build && cd build && \
|
| 23 |
+
cmake -DCMAKE_INSTALL_PREFIX=/opt/rocm/llvm -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=1 -DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" -DLLVM_ENABLE_PROJECTS="clang;lld" -DLLVM_ENABLE_RUNTIMES="compiler-rt" ../llvm && \
|
| 24 |
+
make -j 16 ; \
|
| 25 |
+
else echo "using the release compiler"; \
|
| 26 |
+
fi
|
Code/Baselines/flash-attention/csrc/composable_kernel/Jenkinsfile
ADDED
|
@@ -0,0 +1,1457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def rocmnode(name) {
|
| 2 |
+
return '(rocmtest || miopen) && (' + name + ')'
|
| 3 |
+
}
|
| 4 |
+
|
| 5 |
+
def show_node_info() {
|
| 6 |
+
sh """
|
| 7 |
+
echo "NODE_NAME = \$NODE_NAME"
|
| 8 |
+
lsb_release -sd
|
| 9 |
+
uname -r
|
| 10 |
+
cat /sys/module/amdgpu/version
|
| 11 |
+
ls /opt/ -la
|
| 12 |
+
"""
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
class Version {
|
| 16 |
+
int major, minor, patch
|
| 17 |
+
@Override
|
| 18 |
+
String toString() {
|
| 19 |
+
return [major, minor, patch].findAll().join('.')
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
def parseVersion(String versionString) {
|
| 23 |
+
if (!versionString) return null
|
| 24 |
+
int[] tokens = versionString.split(/\./).collect { it as int } // Splits the string by '.' and converts each part to an integer.
|
| 25 |
+
return new Version(
|
| 26 |
+
major: tokens[0],
|
| 27 |
+
minor: tokens.length > 1 ? tokens[1] : null,
|
| 28 |
+
patch: tokens.length > 2 ? tokens[2] : null,
|
| 29 |
+
)
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
def nthreads() {
|
| 33 |
+
def nproc = sh(returnStdout: true, script: 'nproc')
|
| 34 |
+
echo "Number of cores: ${nproc}"
|
| 35 |
+
def n = nproc.toInteger()
|
| 36 |
+
if (n > 32){
|
| 37 |
+
n /= 2
|
| 38 |
+
}
|
| 39 |
+
if (n > 64){
|
| 40 |
+
n = 64
|
| 41 |
+
}
|
| 42 |
+
echo "Number of threads used for building: ${n}"
|
| 43 |
+
return n
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
def runShell(String command){
|
| 47 |
+
def responseCode = sh returnStatus: true, script: "${command} > tmp.txt"
|
| 48 |
+
def output = readFile(file: "tmp.txt")
|
| 49 |
+
return (output != "")
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
def getBaseDockerImageName(){
|
| 53 |
+
def img
|
| 54 |
+
if (params.USE_CUSTOM_DOCKER != ""){
|
| 55 |
+
img = "${params.USE_CUSTOM_DOCKER}"
|
| 56 |
+
}
|
| 57 |
+
else{
|
| 58 |
+
def ROCM_numeric = parseVersion("${params.ROCMVERSION}")
|
| 59 |
+
if ( ROCM_numeric.major <= 6 && ROCM_numeric.minor < 5 ){
|
| 60 |
+
img = "${env.CK_DOCKERHUB}:ck_ub24.04_rocm${params.ROCMVERSION}"
|
| 61 |
+
}
|
| 62 |
+
else{
|
| 63 |
+
img = "${env.CK_DOCKERHUB_PRIVATE}:ck_ub24.04_rocm${params.ROCMVERSION}"
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
return img
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
def getDockerImageName(){
|
| 70 |
+
def img
|
| 71 |
+
def base_name = getBaseDockerImageName()
|
| 72 |
+
if (params.USE_CUSTOM_DOCKER != ""){
|
| 73 |
+
img = "${params.USE_CUSTOM_DOCKER}"
|
| 74 |
+
}
|
| 75 |
+
else{
|
| 76 |
+
if (params.COMPILER_VERSION == "") {
|
| 77 |
+
img = "${base_name}"
|
| 78 |
+
}
|
| 79 |
+
else{
|
| 80 |
+
if (params.COMPILER_COMMIT == ""){
|
| 81 |
+
img = "${base_name}_${params.COMPILER_VERSION}"
|
| 82 |
+
}
|
| 83 |
+
else{
|
| 84 |
+
def commit = "${params.COMPILER_COMMIT}"[0..6]
|
| 85 |
+
img = "${base_name}_${params.COMPILER_VERSION}_${commit}"
|
| 86 |
+
}
|
| 87 |
+
}
|
| 88 |
+
}
|
| 89 |
+
return img
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
def check_host() {
|
| 93 |
+
if ("${env.CK_SCCACHE}" != "null"){
|
| 94 |
+
def SCCACHE_SERVER="${env.CK_SCCACHE.split(':')[0]}"
|
| 95 |
+
echo "sccache server: ${SCCACHE_SERVER}"
|
| 96 |
+
sh "chmod +w -R ${env.WORKSPACE}"
|
| 97 |
+
sh '''ping -c 1 -p 6379 "${SCCACHE_SERVER}" | echo $? > tmp.txt'''
|
| 98 |
+
def output = readFile(file: "tmp.txt")
|
| 99 |
+
echo "tmp.txt contents: \$output"
|
| 100 |
+
return (output != "0")
|
| 101 |
+
}
|
| 102 |
+
else{
|
| 103 |
+
return 1
|
| 104 |
+
}
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
def build_compiler(){
|
| 108 |
+
def compiler
|
| 109 |
+
compiler = "${params.BUILD_COMPILER}"
|
| 110 |
+
return compiler
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
def check_arch(){
|
| 114 |
+
def arch_type = 0
|
| 115 |
+
sh 'rocminfo | tee rocminfo.log'
|
| 116 |
+
if ( runShell('grep -n "gfx90a" rocminfo.log') ){
|
| 117 |
+
arch_type = 1
|
| 118 |
+
}
|
| 119 |
+
else if ( runShell('grep -n "gfx942" rocminfo.log') ) {
|
| 120 |
+
arch_type = 2
|
| 121 |
+
}
|
| 122 |
+
else if ( runShell('grep -n "gfx10" rocminfo.log') ) {
|
| 123 |
+
arch_type = 3
|
| 124 |
+
}
|
| 125 |
+
else if ( runShell('grep -n "gfx11" rocminfo.log') ) {
|
| 126 |
+
arch_type = 4
|
| 127 |
+
}
|
| 128 |
+
else if ( runShell('grep -n "gfx12" rocminfo.log') ) {
|
| 129 |
+
arch_type = 5
|
| 130 |
+
}
|
| 131 |
+
else if ( runShell('grep -n "gfx908" rocminfo.log') ) {
|
| 132 |
+
arch_type = 6
|
| 133 |
+
}
|
| 134 |
+
else if ( runShell('grep -n "gfx950" rocminfo.log') ) {
|
| 135 |
+
arch_type = 7
|
| 136 |
+
}
|
| 137 |
+
return arch_type
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
def getDockerImage(Map conf=[:]){
|
| 141 |
+
env.DOCKER_BUILDKIT=1
|
| 142 |
+
def prefixpath = conf.get("prefixpath", "/opt/rocm")
|
| 143 |
+
def no_cache = conf.get("no_cache", false)
|
| 144 |
+
def dockerArgs = "--build-arg BUILDKIT_INLINE_CACHE=1 --build-arg PREFIX=${prefixpath} --build-arg CK_SCCACHE='${env.CK_SCCACHE}' --build-arg compiler_version='${params.COMPILER_VERSION}' --build-arg compiler_commit='${params.COMPILER_COMMIT}' --build-arg ROCMVERSION='${params.ROCMVERSION}' --build-arg DISABLE_CACHE='git rev-parse ${params.COMPILER_VERSION}' "
|
| 145 |
+
if(no_cache)
|
| 146 |
+
{
|
| 147 |
+
dockerArgs = dockerArgs + " --no-cache "
|
| 148 |
+
}
|
| 149 |
+
echo "Docker Args: ${dockerArgs}"
|
| 150 |
+
def image
|
| 151 |
+
if ( params.BUILD_LEGACY_OS && conf.get("docker_name", "") != "" ){
|
| 152 |
+
image = conf.get("docker_name", "")
|
| 153 |
+
echo "Using legacy docker: ${image}"
|
| 154 |
+
}
|
| 155 |
+
else if ( params.BUILD_GFX950 && conf.get("docker_name", "") != "" ){
|
| 156 |
+
image = conf.get("docker_name", "")
|
| 157 |
+
echo "Using special docker: ${image}"
|
| 158 |
+
}
|
| 159 |
+
else{
|
| 160 |
+
image = getDockerImageName()
|
| 161 |
+
echo "Using default docker: ${image}"
|
| 162 |
+
}
|
| 163 |
+
//Check if image exists
|
| 164 |
+
def retimage
|
| 165 |
+
try
|
| 166 |
+
{
|
| 167 |
+
echo "Pulling image: ${image}"
|
| 168 |
+
retimage = docker.image("${image}")
|
| 169 |
+
withDockerRegistry([ credentialsId: "ck_docker_cred", url: "" ]) {
|
| 170 |
+
retimage.pull()
|
| 171 |
+
}
|
| 172 |
+
}
|
| 173 |
+
catch(Exception ex)
|
| 174 |
+
{
|
| 175 |
+
error "Unable to locate image: ${image}"
|
| 176 |
+
}
|
| 177 |
+
return [retimage, image]
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
def buildDocker(install_prefix){
|
| 181 |
+
show_node_info()
|
| 182 |
+
env.DOCKER_BUILDKIT=1
|
| 183 |
+
checkout scm
|
| 184 |
+
def image_name = getDockerImageName()
|
| 185 |
+
def base_image_name = getBaseDockerImageName()
|
| 186 |
+
echo "Building Docker for ${image_name}"
|
| 187 |
+
def dockerArgs = "--build-arg PREFIX=${install_prefix} --build-arg CK_SCCACHE='${env.CK_SCCACHE}' --build-arg compiler_version='${params.COMPILER_VERSION}' --build-arg compiler_commit='${params.COMPILER_COMMIT}' --build-arg ROCMVERSION='${params.ROCMVERSION}' "
|
| 188 |
+
if(params.COMPILER_VERSION == "amd-staging" || params.COMPILER_VERSION == "amd-mainline" || params.COMPILER_COMMIT != ""){
|
| 189 |
+
dockerArgs = dockerArgs + " --no-cache --build-arg BASE_DOCKER='${base_image_name}' -f Dockerfile.compiler . "
|
| 190 |
+
}
|
| 191 |
+
else{
|
| 192 |
+
dockerArgs = dockerArgs + " -f Dockerfile . "
|
| 193 |
+
}
|
| 194 |
+
echo "Build Args: ${dockerArgs}"
|
| 195 |
+
try{
|
| 196 |
+
if(params.BUILD_DOCKER){
|
| 197 |
+
//force building the new docker if that parameter is true
|
| 198 |
+
echo "Building image: ${image_name}"
|
| 199 |
+
retimage = docker.build("${image_name}", dockerArgs)
|
| 200 |
+
withDockerRegistry([ credentialsId: "ck_docker_cred", url: "" ]) {
|
| 201 |
+
retimage.push()
|
| 202 |
+
}
|
| 203 |
+
sh 'docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi'
|
| 204 |
+
}
|
| 205 |
+
else{
|
| 206 |
+
echo "Checking for image: ${image_name}"
|
| 207 |
+
sh "docker manifest inspect --insecure ${image_name}"
|
| 208 |
+
echo "Image: ${image_name} found! Skipping building image"
|
| 209 |
+
}
|
| 210 |
+
}
|
| 211 |
+
catch(Exception ex){
|
| 212 |
+
echo "Unable to locate image: ${image_name}. Building image now"
|
| 213 |
+
retimage = docker.build("${image_name}", dockerArgs + ' .')
|
| 214 |
+
withDockerRegistry([ credentialsId: "ck_docker_cred", url: "" ]) {
|
| 215 |
+
retimage.push()
|
| 216 |
+
}
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
def cmake_build(Map conf=[:]){
|
| 221 |
+
|
| 222 |
+
def compiler = build_compiler()
|
| 223 |
+
def config_targets = conf.get("config_targets","check")
|
| 224 |
+
def debug_flags = "-g -fno-omit-frame-pointer -fsanitize=undefined -fno-sanitize-recover=undefined " + conf.get("extradebugflags", "")
|
| 225 |
+
def build_envs = "CTEST_PARALLEL_LEVEL=4 " + conf.get("build_env","")
|
| 226 |
+
def prefixpath = conf.get("prefixpath","/opt/rocm")
|
| 227 |
+
def setup_args = conf.get("setup_args","")
|
| 228 |
+
|
| 229 |
+
if (prefixpath != "/usr/local"){
|
| 230 |
+
setup_args = setup_args + " -DCMAKE_PREFIX_PATH=${prefixpath} "
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
def build_type_debug = (conf.get("build_type",'release') == 'debug')
|
| 234 |
+
|
| 235 |
+
// use special compiler for gfx950
|
| 236 |
+
if ( check_arch() == 7){
|
| 237 |
+
compiler = "/llvm-project/build/bin/clang++"
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
//cmake_env can overwrite default CXX variables.
|
| 241 |
+
def cmake_envs = "CXX=${compiler} CXXFLAGS='-Werror' " + conf.get("cmake_ex_env","")
|
| 242 |
+
|
| 243 |
+
def package_build = (conf.get("package_build","") == "true")
|
| 244 |
+
|
| 245 |
+
if (package_build == true) {
|
| 246 |
+
config_targets = "package"
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
if(conf.get("build_install","") == "true")
|
| 250 |
+
{
|
| 251 |
+
config_targets = 'install ' + config_targets
|
| 252 |
+
setup_args = ' -DBUILD_DEV=On -DCMAKE_INSTALL_PREFIX=../install' + setup_args
|
| 253 |
+
} else{
|
| 254 |
+
setup_args = ' -DBUILD_DEV=On' + setup_args
|
| 255 |
+
}
|
| 256 |
+
if (params.DISABLE_DL_KERNELS){
|
| 257 |
+
setup_args = setup_args + " -DDISABLE_DL_KERNELS=ON "
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
if(build_type_debug){
|
| 261 |
+
setup_args = " -DCMAKE_BUILD_TYPE=debug -DCMAKE_CXX_FLAGS_DEBUG='${debug_flags}'" + setup_args
|
| 262 |
+
}else{
|
| 263 |
+
setup_args = " -DCMAKE_BUILD_TYPE=release" + setup_args
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
def pre_setup_cmd = """
|
| 267 |
+
#!/bin/bash
|
| 268 |
+
echo \$HSA_ENABLE_SDMA
|
| 269 |
+
ulimit -c unlimited
|
| 270 |
+
rm -rf build
|
| 271 |
+
mkdir build
|
| 272 |
+
rm -rf install
|
| 273 |
+
mkdir install
|
| 274 |
+
cd build
|
| 275 |
+
"""
|
| 276 |
+
def invocation_tag=""
|
| 277 |
+
if (setup_args.contains("gfx12")){
|
| 278 |
+
invocation_tag="gfx12"
|
| 279 |
+
}
|
| 280 |
+
if (setup_args.contains("gfx11")){
|
| 281 |
+
invocation_tag="gfx11"
|
| 282 |
+
}
|
| 283 |
+
if (setup_args.contains("gfx10")){
|
| 284 |
+
invocation_tag="gfx10"
|
| 285 |
+
}
|
| 286 |
+
if (setup_args.contains("gfx908")){
|
| 287 |
+
invocation_tag="gfx908"
|
| 288 |
+
}
|
| 289 |
+
if (setup_args.contains("gfx90a")){
|
| 290 |
+
invocation_tag="gfx90a"
|
| 291 |
+
}
|
| 292 |
+
if (setup_args.contains("gfx94")){
|
| 293 |
+
invocation_tag="gfx94"
|
| 294 |
+
}
|
| 295 |
+
if (setup_args.contains("gfx95")){
|
| 296 |
+
invocation_tag="gfx95"
|
| 297 |
+
}
|
| 298 |
+
echo "invocation tag: ${invocation_tag}"
|
| 299 |
+
def redis_pre_setup_cmd = pre_setup_cmd
|
| 300 |
+
if(check_host() && params.USE_SCCACHE && "${env.CK_SCCACHE}" != "null" && "${invocation_tag}" != "") {
|
| 301 |
+
redis_pre_setup_cmd = pre_setup_cmd + """
|
| 302 |
+
#!/bin/bash
|
| 303 |
+
export ROCM_PATH=/opt/rocm
|
| 304 |
+
export SCCACHE_ENABLED=true
|
| 305 |
+
export SCCACHE_LOG_LEVEL=debug
|
| 306 |
+
export SCCACHE_IDLE_TIMEOUT=14400
|
| 307 |
+
export COMPILERS_HASH_DIR=/tmp/.sccache
|
| 308 |
+
export SCCACHE_BIN=/usr/local/.cargo/bin/sccache
|
| 309 |
+
export SCCACHE_EXTRAFILES=/tmp/.sccache/rocm_compilers_hash_file
|
| 310 |
+
export SCCACHE_REDIS="redis://${env.CK_SCCACHE}"
|
| 311 |
+
echo "connect = ${env.CK_SCCACHE}" >> ../script/redis-cli.conf
|
| 312 |
+
export SCCACHE_C_CUSTOM_CACHE_BUSTER="${invocation_tag}"
|
| 313 |
+
echo \$SCCACHE_C_CUSTOM_CACHE_BUSTER
|
| 314 |
+
stunnel ../script/redis-cli.conf
|
| 315 |
+
../script/sccache_wrapper.sh --enforce_redis
|
| 316 |
+
"""
|
| 317 |
+
try {
|
| 318 |
+
def cmd1 = conf.get("cmd1", """
|
| 319 |
+
${redis_pre_setup_cmd}
|
| 320 |
+
""")
|
| 321 |
+
sh cmd1
|
| 322 |
+
setup_args = " -DCMAKE_CXX_COMPILER_LAUNCHER=sccache -DCMAKE_C_COMPILER_LAUNCHER=sccache " + setup_args
|
| 323 |
+
}
|
| 324 |
+
catch(Exception err){
|
| 325 |
+
echo "could not connect to redis server: ${err.getMessage()}. will not use sccache."
|
| 326 |
+
def cmd2 = conf.get("cmd2", """
|
| 327 |
+
${pre_setup_cmd}
|
| 328 |
+
""")
|
| 329 |
+
sh cmd2
|
| 330 |
+
}
|
| 331 |
+
}
|
| 332 |
+
else{
|
| 333 |
+
def cmd3 = conf.get("cmd3", """
|
| 334 |
+
${pre_setup_cmd}
|
| 335 |
+
""")
|
| 336 |
+
sh cmd3
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
// reduce parallelism when compiling, clang uses too much memory
|
| 340 |
+
def nt = nthreads()
|
| 341 |
+
def cmd
|
| 342 |
+
def setup_cmd
|
| 343 |
+
def build_cmd
|
| 344 |
+
def execute_cmd = conf.get("execute_cmd", "")
|
| 345 |
+
if(!setup_args.contains("NO_CK_BUILD")){
|
| 346 |
+
if (setup_args.contains("gfx9") && params.NINJA_BUILD_TRACE){
|
| 347 |
+
echo "running ninja build trace"
|
| 348 |
+
setup_cmd = conf.get("setup_cmd", """${cmake_envs} cmake -G Ninja ${setup_args} -DCMAKE_CXX_FLAGS=" -O3 -ftime-trace " .. """)
|
| 349 |
+
build_cmd = conf.get("build_cmd", "${build_envs} ninja -j${nt} ${config_targets}")
|
| 350 |
+
}
|
| 351 |
+
else{
|
| 352 |
+
setup_cmd = conf.get("setup_cmd", "${cmake_envs} cmake ${setup_args} .. ")
|
| 353 |
+
build_cmd = conf.get("build_cmd", "${build_envs} make -j${nt} ${config_targets}")
|
| 354 |
+
}
|
| 355 |
+
cmd = conf.get("cmd", """
|
| 356 |
+
${setup_cmd}
|
| 357 |
+
${build_cmd}
|
| 358 |
+
${execute_cmd}
|
| 359 |
+
""")
|
| 360 |
+
}
|
| 361 |
+
else{
|
| 362 |
+
cmd = conf.get("cmd", """
|
| 363 |
+
${execute_cmd}
|
| 364 |
+
""")
|
| 365 |
+
}
|
| 366 |
+
|
| 367 |
+
echo cmd
|
| 368 |
+
|
| 369 |
+
dir("build"){
|
| 370 |
+
//build CK
|
| 371 |
+
sh cmd
|
| 372 |
+
//run tests except when NO_CK_BUILD or BUILD_LEGACY_OS are set
|
| 373 |
+
if(!setup_args.contains("NO_CK_BUILD") && !params.BUILD_LEGACY_OS){
|
| 374 |
+
if ((setup_args.contains("gfx9") && params.NINJA_BUILD_TRACE) || params.BUILD_INSTANCES_ONLY){
|
| 375 |
+
sh "/ninjatracing/ninjatracing .ninja_log > ck_build_trace.json"
|
| 376 |
+
sh "/ClangBuildAnalyzer/build/ClangBuildAnalyzer --all . clang_build.log"
|
| 377 |
+
sh "/ClangBuildAnalyzer/build/ClangBuildAnalyzer --analyze clang_build.log > clang_build_analysis.log"
|
| 378 |
+
archiveArtifacts "ck_build_trace.json"
|
| 379 |
+
archiveArtifacts "clang_build_analysis.log"
|
| 380 |
+
// do not run unit tests when building instances only
|
| 381 |
+
if(!params.BUILD_INSTANCES_ONLY){
|
| 382 |
+
sh "ninja check"
|
| 383 |
+
}
|
| 384 |
+
if(params.BUILD_INSTANCES_ONLY){
|
| 385 |
+
// build deb packages
|
| 386 |
+
echo "Build packages"
|
| 387 |
+
sh 'ninja -j64 package'
|
| 388 |
+
archiveArtifacts artifacts: 'composablekernel-dev*.deb'
|
| 389 |
+
sh 'mv composablekernel-dev_*.deb composablekernel-dev_all_targets_1.1.0_amd64.deb'
|
| 390 |
+
stash includes: "composablekernel-dev_all_targets_1.1.0_amd64.deb", name: "packages"
|
| 391 |
+
}
|
| 392 |
+
}
|
| 393 |
+
else{
|
| 394 |
+
// run unit tests unless building library for all targets
|
| 395 |
+
if (!params.BUILD_INSTANCES_ONLY){
|
| 396 |
+
sh "make check"
|
| 397 |
+
}
|
| 398 |
+
}
|
| 399 |
+
}
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
// Only archive from develop
|
| 403 |
+
if (package_build == true && env.BRANCH_NAME == "develop") {
|
| 404 |
+
archiveArtifacts artifacts: "build/*.deb", allowEmptyArchive: true, fingerprint: true
|
| 405 |
+
}
|
| 406 |
+
//check the node gpu architecture
|
| 407 |
+
def arch = check_arch()
|
| 408 |
+
if (params.RUN_CK_TILE_FMHA_TESTS){
|
| 409 |
+
try{
|
| 410 |
+
archiveArtifacts "perf_fmha_*.log"
|
| 411 |
+
if (arch == 1){
|
| 412 |
+
stash includes: "perf_fmha_**_gfx90a.log", name: "perf_fmha_log_gfx90a"
|
| 413 |
+
}
|
| 414 |
+
else if (arch == 2){
|
| 415 |
+
stash includes: "perf_fmha_**_gfx942.log", name: "perf_fmha_log_gfx942"
|
| 416 |
+
}
|
| 417 |
+
}
|
| 418 |
+
catch(Exception err){
|
| 419 |
+
echo "could not locate the requested artifacts: ${err.getMessage()}. will skip the stashing."
|
| 420 |
+
}
|
| 421 |
+
}
|
| 422 |
+
if (params.RUN_CK_TILE_TRANSPOSE_TESTS){
|
| 423 |
+
try{
|
| 424 |
+
archiveArtifacts "perf_transpose_*.log"
|
| 425 |
+
if (arch_type == 1){
|
| 426 |
+
stash includes: "perf_transpose_**_gfx90a.log", name: "perf_transpose_log_gfx90a"
|
| 427 |
+
}
|
| 428 |
+
else if (arch_type == 2){
|
| 429 |
+
stash includes: "perf_transpose_**_gfx942.log", name: "perf_transpose_log_gfx942"
|
| 430 |
+
}
|
| 431 |
+
}
|
| 432 |
+
catch(Exception err){
|
| 433 |
+
echo "could not locate the requested artifacts: ${err.getMessage()}. will skip the stashing."
|
| 434 |
+
}
|
| 435 |
+
}
|
| 436 |
+
if (params.RUN_CK_TILE_GEMM_TESTS){
|
| 437 |
+
try{
|
| 438 |
+
archiveArtifacts "perf_tile_gemm_**.log"
|
| 439 |
+
if (arch == 1){
|
| 440 |
+
stash includes: "perf_tile_gemm_**_gfx90a.log", name: "perf_tile_gemm_log_gfx90a"
|
| 441 |
+
}
|
| 442 |
+
else if (arch == 2){
|
| 443 |
+
stash includes: "perf_tile_gemm_**_gfx942.log", name: "perf_tile_gemm_log_gfx942"
|
| 444 |
+
}
|
| 445 |
+
}
|
| 446 |
+
catch(Exception err){
|
| 447 |
+
echo "could not locate the requested artifacts: ${err.getMessage()}. will skip the stashing."
|
| 448 |
+
}
|
| 449 |
+
}
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
def buildHipClangJob(Map conf=[:]){
|
| 453 |
+
show_node_info()
|
| 454 |
+
|
| 455 |
+
env.HSA_ENABLE_SDMA=0
|
| 456 |
+
checkout scm
|
| 457 |
+
def prefixpath = conf.get("prefixpath", "/opt/rocm")
|
| 458 |
+
|
| 459 |
+
// Jenkins is complaining about the render group
|
| 460 |
+
def dockerOpts
|
| 461 |
+
if ( params.BUILD_INSTANCES_ONLY ){
|
| 462 |
+
dockerOpts = "--group-add video --group-add render --cap-add=SYS_PTRACE --security-opt seccomp=unconfined"
|
| 463 |
+
}
|
| 464 |
+
else{
|
| 465 |
+
dockerOpts = "--device=/dev/kfd --device=/dev/dri --group-add video --group-add render --cap-add=SYS_PTRACE --security-opt seccomp=unconfined"
|
| 466 |
+
}
|
| 467 |
+
if (conf.get("enforce_xnack_on", false)) {
|
| 468 |
+
dockerOpts = dockerOpts + " --env HSA_XNACK=1 "
|
| 469 |
+
}
|
| 470 |
+
def dockerArgs = "--build-arg PREFIX=${prefixpath} --build-arg CK_SCCACHE='${env.CK_SCCACHE}' --build-arg compiler_version='${params.COMPILER_VERSION}' --build-arg compiler_commit='${params.COMPILER_COMMIT}' --build-arg ROCMVERSION='${params.ROCMVERSION}' "
|
| 471 |
+
if (params.COMPILER_VERSION == "amd-staging" || params.COMPILER_VERSION == "amd-mainline" || params.COMPILER_COMMIT != ""){
|
| 472 |
+
dockerOpts = dockerOpts + " --env HIP_CLANG_PATH='/llvm-project/build/bin' "
|
| 473 |
+
}
|
| 474 |
+
def video_id = sh(returnStdout: true, script: 'getent group video | cut -d: -f3')
|
| 475 |
+
def render_id = sh(returnStdout: true, script: 'getent group render | cut -d: -f3')
|
| 476 |
+
dockerOpts = dockerOpts + " --group-add=${video_id} --group-add=${render_id} "
|
| 477 |
+
echo "Docker flags: ${dockerOpts}"
|
| 478 |
+
|
| 479 |
+
def variant = env.STAGE_NAME
|
| 480 |
+
def image
|
| 481 |
+
def retimage
|
| 482 |
+
(retimage, image) = getDockerImage(conf)
|
| 483 |
+
|
| 484 |
+
gitStatusWrapper(credentialsId: "${env.ck_git_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCm', repo: 'composable_kernel') {
|
| 485 |
+
withDockerContainer(image: image, args: dockerOpts + ' -v=/var/jenkins/:/var/jenkins') {
|
| 486 |
+
timeout(time: 20, unit: 'HOURS')
|
| 487 |
+
{
|
| 488 |
+
cmake_build(conf)
|
| 489 |
+
}
|
| 490 |
+
}
|
| 491 |
+
}
|
| 492 |
+
return retimage
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
def reboot(){
|
| 496 |
+
build job: 'reboot-slaves', propagate: false , parameters: [string(name: 'server', value: "${env.NODE_NAME}"),]
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
def buildHipClangJobAndReboot(Map conf=[:]){
|
| 500 |
+
try{
|
| 501 |
+
buildHipClangJob(conf)
|
| 502 |
+
}
|
| 503 |
+
catch(e){
|
| 504 |
+
echo "throwing error exception for the stage"
|
| 505 |
+
echo 'Exception occurred: ' + e.toString()
|
| 506 |
+
throw e
|
| 507 |
+
}
|
| 508 |
+
finally{
|
| 509 |
+
if (!conf.get("no_reboot", false)) {
|
| 510 |
+
reboot()
|
| 511 |
+
}
|
| 512 |
+
}
|
| 513 |
+
}
|
| 514 |
+
|
| 515 |
+
def Build_CK(Map conf=[:]){
|
| 516 |
+
show_node_info()
|
| 517 |
+
|
| 518 |
+
env.HSA_ENABLE_SDMA=0
|
| 519 |
+
env.DOCKER_BUILDKIT=1
|
| 520 |
+
checkout scm
|
| 521 |
+
def prefixpath = conf.get("prefixpath", "/opt/rocm")
|
| 522 |
+
|
| 523 |
+
// Jenkins is complaining about the render group
|
| 524 |
+
def dockerOpts="--device=/dev/kfd --device=/dev/dri --group-add video --group-add render --cap-add=SYS_PTRACE --security-opt seccomp=unconfined"
|
| 525 |
+
if (conf.get("enforce_xnack_on", false)) {
|
| 526 |
+
dockerOpts = dockerOpts + " --env HSA_XNACK=1 "
|
| 527 |
+
}
|
| 528 |
+
def dockerArgs = "--build-arg PREFIX=${prefixpath} --build-arg compiler_version='${params.COMPILER_VERSION}' --build-arg compiler_commit='${params.COMPILER_COMMIT}' --build-arg ROCMVERSION='${params.ROCMVERSION}' "
|
| 529 |
+
if (params.COMPILER_VERSION == "amd-staging" || params.COMPILER_VERSION == "amd-mainline" || params.COMPILER_COMMIT != ""){
|
| 530 |
+
dockerOpts = dockerOpts + " --env HIP_CLANG_PATH='/llvm-project/build/bin' "
|
| 531 |
+
}
|
| 532 |
+
if(params.BUILD_LEGACY_OS){
|
| 533 |
+
dockerOpts = dockerOpts + " --env LD_LIBRARY_PATH='/opt/Python-3.8.13/lib' "
|
| 534 |
+
}
|
| 535 |
+
def video_id = sh(returnStdout: true, script: 'getent group video | cut -d: -f3')
|
| 536 |
+
def render_id = sh(returnStdout: true, script: 'getent group render | cut -d: -f3')
|
| 537 |
+
dockerOpts = dockerOpts + " --group-add=${video_id} --group-add=${render_id} "
|
| 538 |
+
echo "Docker flags: ${dockerOpts}"
|
| 539 |
+
|
| 540 |
+
def variant = env.STAGE_NAME
|
| 541 |
+
def image
|
| 542 |
+
def retimage
|
| 543 |
+
|
| 544 |
+
gitStatusWrapper(credentialsId: "${env.ck_git_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCm', repo: 'composable_kernel') {
|
| 545 |
+
try {
|
| 546 |
+
(retimage, image) = getDockerImage(conf)
|
| 547 |
+
withDockerContainer(image: image, args: dockerOpts) {
|
| 548 |
+
timeout(time: 2, unit: 'MINUTES'){
|
| 549 |
+
sh 'rocminfo | tee rocminfo.log'
|
| 550 |
+
if ( !runShell('grep -n "gfx" rocminfo.log') ){
|
| 551 |
+
throw new Exception ("GPU not found")
|
| 552 |
+
}
|
| 553 |
+
else{
|
| 554 |
+
echo "GPU is OK"
|
| 555 |
+
}
|
| 556 |
+
}
|
| 557 |
+
}
|
| 558 |
+
}
|
| 559 |
+
catch (org.jenkinsci.plugins.workflow.steps.FlowInterruptedException e){
|
| 560 |
+
echo "The job was cancelled or aborted"
|
| 561 |
+
throw e
|
| 562 |
+
}
|
| 563 |
+
withDockerContainer(image: image, args: dockerOpts + ' -v=/var/jenkins/:/var/jenkins') {
|
| 564 |
+
timeout(time: 20, unit: 'HOURS')
|
| 565 |
+
{
|
| 566 |
+
//check whether to run performance tests on this node
|
| 567 |
+
def arch = check_arch()
|
| 568 |
+
cmake_build(conf)
|
| 569 |
+
if ( params.RUN_INDUCTOR_TESTS && !params.BUILD_LEGACY_OS && arch == 1 ){
|
| 570 |
+
echo "Run inductor codegen tests"
|
| 571 |
+
sh """
|
| 572 |
+
python3 -m venv ${env.WORKSPACE}
|
| 573 |
+
. ${env.WORKSPACE}/bin/activate
|
| 574 |
+
python3 -m pip install pytest build setuptools setuptools_scm
|
| 575 |
+
python3 -m pip install .
|
| 576 |
+
python3 -m pytest python/test/test_gen_instances.py
|
| 577 |
+
"""
|
| 578 |
+
}
|
| 579 |
+
dir("build"){
|
| 580 |
+
if (params.RUN_FULL_QA && arch == 2 ){
|
| 581 |
+
// build deb packages
|
| 582 |
+
echo "Build packages"
|
| 583 |
+
sh 'make -j package'
|
| 584 |
+
archiveArtifacts artifacts: 'composablekernel*.deb'
|
| 585 |
+
sh 'mv composablekernel-ckprofiler_*.deb composablekernel-ckprofiler_1.1.0_amd64.deb'
|
| 586 |
+
sh 'mv composablekernel-dev_*.deb composablekernel-dev_1.1.0_amd64.deb'
|
| 587 |
+
sh 'mv composablekernel-examples_*.deb composablekernel-examples_1.1.0_amd64.deb'
|
| 588 |
+
sh 'mv composablekernel-tests_*.deb composablekernel-tests_1.1.0_amd64.deb'
|
| 589 |
+
stash includes: "composablekernel-**.deb", name: "packages"
|
| 590 |
+
}
|
| 591 |
+
}
|
| 592 |
+
// run performance tests, stash the logs, results will be processed on the master node
|
| 593 |
+
dir("script"){
|
| 594 |
+
if (params.RUN_PERFORMANCE_TESTS){
|
| 595 |
+
if (params.RUN_FULL_QA && arch == 1){
|
| 596 |
+
// run full tests on gfx90a
|
| 597 |
+
echo "Run full performance tests"
|
| 598 |
+
sh "./run_full_performance_tests.sh 0 QA_${params.COMPILER_VERSION} ${env.BRANCH_NAME} ${NODE_NAME}"
|
| 599 |
+
archiveArtifacts "perf_gemm.log"
|
| 600 |
+
archiveArtifacts "perf_resnet50_N256.log"
|
| 601 |
+
archiveArtifacts "perf_resnet50_N4.log"
|
| 602 |
+
archiveArtifacts "perf_batched_gemm.log"
|
| 603 |
+
archiveArtifacts "perf_grouped_gemm.log"
|
| 604 |
+
archiveArtifacts "perf_grouped_conv_fwd.log"
|
| 605 |
+
archiveArtifacts "perf_grouped_conv_bwd_data.log"
|
| 606 |
+
archiveArtifacts "perf_grouped_conv_bwd_weight.log"
|
| 607 |
+
archiveArtifacts "perf_gemm_bilinear.log"
|
| 608 |
+
archiveArtifacts "perf_reduction.log"
|
| 609 |
+
archiveArtifacts "perf_splitK_gemm.log"
|
| 610 |
+
archiveArtifacts "perf_onnx_gemm.log"
|
| 611 |
+
archiveArtifacts "perf_mixed_gemm.log"
|
| 612 |
+
stash includes: "perf_**.log", name: "perf_log"
|
| 613 |
+
}
|
| 614 |
+
else if ( arch == 1 ){
|
| 615 |
+
// run standard tests on gfx90a
|
| 616 |
+
echo "Run performance tests"
|
| 617 |
+
sh "./run_performance_tests.sh 0 CI_${params.COMPILER_VERSION} ${env.BRANCH_NAME} ${NODE_NAME}"
|
| 618 |
+
archiveArtifacts "perf_gemm.log"
|
| 619 |
+
archiveArtifacts "perf_onnx_gemm.log"
|
| 620 |
+
archiveArtifacts "perf_resnet50_N256.log"
|
| 621 |
+
archiveArtifacts "perf_resnet50_N4.log"
|
| 622 |
+
stash includes: "perf_**.log", name: "perf_log"
|
| 623 |
+
}
|
| 624 |
+
// disable performance tests on gfx1030 for now.
|
| 625 |
+
//else if ( arch == 3){
|
| 626 |
+
// run basic tests on gfx1030
|
| 627 |
+
// echo "Run gemm performance tests"
|
| 628 |
+
// sh "./run_gemm_performance_tests.sh 0 CI_${params.COMPILER_VERSION} ${env.BRANCH_NAME} ${NODE_NAME} gfx10"
|
| 629 |
+
// archiveArtifacts "perf_onnx_gemm_gfx10.log"
|
| 630 |
+
// stash includes: "perf_onnx_gemm_gfx10.log", name: "perf_log_gfx10"
|
| 631 |
+
//}
|
| 632 |
+
else if ( arch == 4){
|
| 633 |
+
// run basic tests on gfx11
|
| 634 |
+
echo "Run gemm performance tests"
|
| 635 |
+
sh "./run_gemm_performance_tests.sh 0 CI_${params.COMPILER_VERSION} ${env.BRANCH_NAME} ${NODE_NAME} gfx11"
|
| 636 |
+
archiveArtifacts "perf_onnx_gemm_gfx11.log"
|
| 637 |
+
stash includes: "perf_onnx_gemm_gfx11.log", name: "perf_log_gfx11"
|
| 638 |
+
}
|
| 639 |
+
else if ( arch == 5 ){
|
| 640 |
+
// run basic tests on gfx12
|
| 641 |
+
echo "Run gemm performance tests"
|
| 642 |
+
sh "./run_gemm_performance_tests.sh 0 CI_${params.COMPILER_VERSION} ${env.BRANCH_NAME} ${NODE_NAME} gfx12"
|
| 643 |
+
archiveArtifacts "perf_onnx_gemm_gfx12.log"
|
| 644 |
+
stash includes: "perf_onnx_gemm_gfx12.log", name: "perf_log_gfx12"
|
| 645 |
+
}
|
| 646 |
+
else if ( arch == 6 ){
|
| 647 |
+
// run basic tests on gfx908
|
| 648 |
+
echo "Run performance tests"
|
| 649 |
+
sh "./run_gemm_performance_tests.sh 0 CI_${params.COMPILER_VERSION} ${env.BRANCH_NAME} ${NODE_NAME} gfx908"
|
| 650 |
+
archiveArtifacts "perf_onnx_gemm_gfx908.log"
|
| 651 |
+
stash includes: "perf_onnx_gemm_gfx908.log", name: "perf_log_gfx908"
|
| 652 |
+
}
|
| 653 |
+
else if ( arch == 7 ){
|
| 654 |
+
// run basic tests on gfx950
|
| 655 |
+
echo "Run performance tests"
|
| 656 |
+
sh "./run_gemm_performance_tests.sh 0 CI_${params.COMPILER_VERSION} ${env.BRANCH_NAME} ${NODE_NAME} gfx950"
|
| 657 |
+
archiveArtifacts "perf_onnx_gemm_gfx950.log"
|
| 658 |
+
stash includes: "perf_onnx_gemm_gfx950.log", name: "perf_log_gfx950"
|
| 659 |
+
}
|
| 660 |
+
}
|
| 661 |
+
}
|
| 662 |
+
if (params.hipTensor_test && arch == 1 ){
|
| 663 |
+
// build and test hipTensor on gfx90a node
|
| 664 |
+
sh """#!/bin/bash
|
| 665 |
+
rm -rf "${params.hipTensor_branch}".zip
|
| 666 |
+
rm -rf hipTensor-"${params.hipTensor_branch}"
|
| 667 |
+
wget https://github.com/ROCm/hipTensor/archive/refs/heads/"${params.hipTensor_branch}".zip
|
| 668 |
+
unzip -o "${params.hipTensor_branch}".zip
|
| 669 |
+
"""
|
| 670 |
+
dir("hipTensor-${params.hipTensor_branch}"){
|
| 671 |
+
sh """#!/bin/bash
|
| 672 |
+
mkdir -p build
|
| 673 |
+
ls -ltr
|
| 674 |
+
CC=hipcc CXX=hipcc cmake -Bbuild . -D CMAKE_PREFIX_PATH="${env.WORKSPACE}/install"
|
| 675 |
+
cmake --build build -- -j
|
| 676 |
+
ctest --test-dir build
|
| 677 |
+
"""
|
| 678 |
+
}
|
| 679 |
+
}
|
| 680 |
+
}
|
| 681 |
+
}
|
| 682 |
+
}
|
| 683 |
+
return retimage
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
def Build_CK_and_Reboot(Map conf=[:]){
|
| 687 |
+
try{
|
| 688 |
+
Build_CK(conf)
|
| 689 |
+
}
|
| 690 |
+
catch(e){
|
| 691 |
+
echo "throwing error exception while building CK"
|
| 692 |
+
echo 'Exception occurred: ' + e.toString()
|
| 693 |
+
throw e
|
| 694 |
+
}
|
| 695 |
+
finally{
|
| 696 |
+
if (!conf.get("no_reboot", false)) {
|
| 697 |
+
reboot()
|
| 698 |
+
}
|
| 699 |
+
}
|
| 700 |
+
}
|
| 701 |
+
|
| 702 |
+
def process_results(Map conf=[:]){
|
| 703 |
+
env.HSA_ENABLE_SDMA=0
|
| 704 |
+
checkout scm
|
| 705 |
+
//use older image that has user jenkins
|
| 706 |
+
def image = "rocm/composable_kernel:ck_ub22.04_rocm6.3"
|
| 707 |
+
def prefixpath = "/opt/rocm"
|
| 708 |
+
|
| 709 |
+
// Jenkins is complaining about the render group
|
| 710 |
+
def dockerOpts="--cap-add=SYS_PTRACE --security-opt seccomp=unconfined"
|
| 711 |
+
if (conf.get("enforce_xnack_on", false)) {
|
| 712 |
+
dockerOpts = dockerOpts + " --env HSA_XNACK=1 "
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
def variant = env.STAGE_NAME
|
| 716 |
+
def retimage
|
| 717 |
+
|
| 718 |
+
gitStatusWrapper(credentialsId: "${env.ck_git_creds}", gitHubContext: "Jenkins - ${variant}", account: 'ROCm', repo: 'composable_kernel') {
|
| 719 |
+
try
|
| 720 |
+
{
|
| 721 |
+
echo "Pulling image: ${image}"
|
| 722 |
+
retimage = docker.image("${image}")
|
| 723 |
+
withDockerRegistry([ credentialsId: "ck_docker_cred", url: "" ]) {
|
| 724 |
+
retimage.pull()
|
| 725 |
+
}
|
| 726 |
+
}
|
| 727 |
+
catch(Exception ex)
|
| 728 |
+
{
|
| 729 |
+
error "Unable to locate image: ${image}"
|
| 730 |
+
}
|
| 731 |
+
}
|
| 732 |
+
|
| 733 |
+
withDockerContainer(image: image, args: dockerOpts + ' -v=/var/jenkins/:/var/jenkins') {
|
| 734 |
+
timeout(time: 15, unit: 'MINUTES'){
|
| 735 |
+
try{
|
| 736 |
+
dir("script"){
|
| 737 |
+
if (params.RUN_CK_TILE_FMHA_TESTS){
|
| 738 |
+
try{
|
| 739 |
+
unstash "perf_fmha_log_gfx942"
|
| 740 |
+
unstash "perf_fmha_log_gfx90a"
|
| 741 |
+
}
|
| 742 |
+
catch(Exception err){
|
| 743 |
+
echo "could not locate the FMHA performance logs: ${err.getMessage()}."
|
| 744 |
+
}
|
| 745 |
+
}
|
| 746 |
+
if (params.RUN_CK_TILE_TRANSPOSE_TESTS){
|
| 747 |
+
try{
|
| 748 |
+
unstash "perf_transpose_log_gfx942"
|
| 749 |
+
unstash "perf_transpose_log_gfx90a"
|
| 750 |
+
}
|
| 751 |
+
catch(Exception err){
|
| 752 |
+
echo "could not locate the Transpose performance logs: ${err.getMessage()}."
|
| 753 |
+
}
|
| 754 |
+
}
|
| 755 |
+
if (params.RUN_CK_TILE_GEMM_TESTS){
|
| 756 |
+
try{
|
| 757 |
+
unstash "perf_tile_gemm_log_gfx942"
|
| 758 |
+
unstash "perf_tile_gemm_log_gfx90a"
|
| 759 |
+
}
|
| 760 |
+
catch(Exception err){
|
| 761 |
+
echo "could not locate the GEMM performance logs: ${err.getMessage()}."
|
| 762 |
+
}
|
| 763 |
+
}
|
| 764 |
+
if (params.RUN_FULL_QA || params.BUILD_INSTANCES_ONLY){
|
| 765 |
+
// unstash deb packages
|
| 766 |
+
unstash "packages"
|
| 767 |
+
sh "sshpass -p ${env.ck_deb_pw} scp -o StrictHostKeyChecking=no composablekernel-*.deb ${env.ck_deb_user}@${env.ck_deb_ip}:/var/www/html/composable_kernel/"
|
| 768 |
+
}
|
| 769 |
+
else{
|
| 770 |
+
// unstash perf files to master
|
| 771 |
+
unstash "perf_log"
|
| 772 |
+
try{
|
| 773 |
+
unstash "perf_log_gfx11"
|
| 774 |
+
unstash "perf_log_gfx12"
|
| 775 |
+
}
|
| 776 |
+
catch(Exception err){
|
| 777 |
+
echo "could not locate the GEMM gfx11/gfx12 performance logs: ${err.getMessage()}."
|
| 778 |
+
}
|
| 779 |
+
sh "./process_perf_data.sh"
|
| 780 |
+
}
|
| 781 |
+
}
|
| 782 |
+
}
|
| 783 |
+
catch(e){
|
| 784 |
+
echo "Throwing error exception while processing performance test results"
|
| 785 |
+
echo 'Exception occurred: ' + e.toString()
|
| 786 |
+
throw e
|
| 787 |
+
}
|
| 788 |
+
finally{
|
| 789 |
+
echo "Finished processing performance test results"
|
| 790 |
+
}
|
| 791 |
+
}
|
| 792 |
+
}
|
| 793 |
+
}
|
| 794 |
+
|
| 795 |
+
//launch develop branch daily jobs
|
| 796 |
+
CRON_SETTINGS = BRANCH_NAME == "develop" ? '''0 23 * * * % RUN_FULL_QA=true;DISABLE_DL_KERNELS=true;RUN_CK_TILE_FMHA_TESTS=true;RUN_CK_TILE_TRANSPOSE_TESTS=true;RUN_CK_TILE_GEMM_TESTS=true;RUN_TILE_ENGINE_GEMM_TESTS=false
|
| 797 |
+
0 21 * * * % RUN_GROUPED_CONV_LARGE_CASES_TESTS=true;hipTensor_test=true;BUILD_GFX908=true;BUILD_GFX950=true
|
| 798 |
+
0 19 * * * % BUILD_DOCKER=true;COMPILER_VERSION=amd-staging;BUILD_COMPILER=/llvm-project/build/bin/clang++;USE_SCCACHE=false;NINJA_BUILD_TRACE=true
|
| 799 |
+
0 17 * * * % BUILD_DOCKER=true;COMPILER_VERSION=amd-mainline;BUILD_COMPILER=/llvm-project/build/bin/clang++;USE_SCCACHE=false;NINJA_BUILD_TRACE=true
|
| 800 |
+
0 15 * * * % BUILD_INSTANCES_ONLY=true;USE_SCCACHE=false;NINJA_BUILD_TRACE=true
|
| 801 |
+
0 13 * * * % BUILD_LEGACY_OS=true;USE_SCCACHE=false;RUN_PERFORMANCE_TESTS=false''' : ""
|
| 802 |
+
|
| 803 |
+
pipeline {
|
| 804 |
+
agent none
|
| 805 |
+
triggers {
|
| 806 |
+
parameterizedCron(CRON_SETTINGS)
|
| 807 |
+
}
|
| 808 |
+
options {
|
| 809 |
+
parallelsAlwaysFailFast()
|
| 810 |
+
}
|
| 811 |
+
parameters {
|
| 812 |
+
booleanParam(
|
| 813 |
+
name: "BUILD_DOCKER",
|
| 814 |
+
defaultValue: false,
|
| 815 |
+
description: "Force building docker image (default: false), set to true if docker image needs to be updated.")
|
| 816 |
+
string(
|
| 817 |
+
name: 'USE_CUSTOM_DOCKER',
|
| 818 |
+
defaultValue: '',
|
| 819 |
+
description: 'If you want to use a custom docker image, please specify it here (default: leave blank).')
|
| 820 |
+
string(
|
| 821 |
+
name: 'ROCMVERSION',
|
| 822 |
+
defaultValue: '6.4.1',
|
| 823 |
+
description: 'Specify which ROCM version to use: 6.4.1 (default).')
|
| 824 |
+
string(
|
| 825 |
+
name: 'COMPILER_VERSION',
|
| 826 |
+
defaultValue: '',
|
| 827 |
+
description: 'Specify which version of compiler to use: release, amd-staging, amd-mainline, or leave blank (default).')
|
| 828 |
+
string(
|
| 829 |
+
name: 'COMPILER_COMMIT',
|
| 830 |
+
defaultValue: '',
|
| 831 |
+
description: 'Specify which commit of compiler branch to use: leave blank to use the latest commit (default), or use some specific commit of llvm-project branch.')
|
| 832 |
+
string(
|
| 833 |
+
name: 'BUILD_COMPILER',
|
| 834 |
+
defaultValue: '/opt/rocm/llvm/bin/clang++',
|
| 835 |
+
description: 'Build CK with /opt/rocm/bin/hipcc, /llvm-project/build/bin/clang++, or with /opt/rocm/llvm/bin/clang++ (default).')
|
| 836 |
+
booleanParam(
|
| 837 |
+
name: "RUN_FULL_QA",
|
| 838 |
+
defaultValue: false,
|
| 839 |
+
description: "Select whether to run small set of performance tests (default) or full QA")
|
| 840 |
+
booleanParam(
|
| 841 |
+
name: "DISABLE_DL_KERNELS",
|
| 842 |
+
defaultValue: false,
|
| 843 |
+
description: "Select whether to build DL kernels (default: OFF)")
|
| 844 |
+
booleanParam(
|
| 845 |
+
name: "hipTensor_test",
|
| 846 |
+
defaultValue: false,
|
| 847 |
+
description: "Use the CK build to verify hipTensor build and tests (default: OFF)")
|
| 848 |
+
string(
|
| 849 |
+
name: 'hipTensor_branch',
|
| 850 |
+
defaultValue: 'mainline',
|
| 851 |
+
description: 'Specify which branch of hipTensor to use (default: mainline)')
|
| 852 |
+
booleanParam(
|
| 853 |
+
name: "USE_SCCACHE",
|
| 854 |
+
defaultValue: true,
|
| 855 |
+
description: "Use the sccache for building CK (default: ON)")
|
| 856 |
+
booleanParam(
|
| 857 |
+
name: "RUN_CPPCHECK",
|
| 858 |
+
defaultValue: false,
|
| 859 |
+
description: "Run the cppcheck static analysis (default: OFF)")
|
| 860 |
+
booleanParam(
|
| 861 |
+
name: "RUN_PERFORMANCE_TESTS",
|
| 862 |
+
defaultValue: true,
|
| 863 |
+
description: "Run the performance tests (default: ON)")
|
| 864 |
+
booleanParam(
|
| 865 |
+
name: "RUN_GROUPED_CONV_LARGE_CASES_TESTS",
|
| 866 |
+
defaultValue: false,
|
| 867 |
+
description: "Run the grouped conv large cases tests (default: OFF)")
|
| 868 |
+
booleanParam(
|
| 869 |
+
name: "RUN_CODEGEN_TESTS",
|
| 870 |
+
defaultValue: true,
|
| 871 |
+
description: "Run codegen tests (default: ON)")
|
| 872 |
+
booleanParam(
|
| 873 |
+
name: "RUN_CK_TILE_FMHA_TESTS",
|
| 874 |
+
defaultValue: false,
|
| 875 |
+
description: "Run the ck_tile FMHA tests (default: OFF)")
|
| 876 |
+
booleanParam(
|
| 877 |
+
name: "RUN_CK_TILE_TRANSPOSE_TESTS",
|
| 878 |
+
defaultValue: false,
|
| 879 |
+
description: "Run the ck_tile Transpose tests (default: OFF)")
|
| 880 |
+
booleanParam(
|
| 881 |
+
name: "RUN_CK_TILE_GEMM_TESTS",
|
| 882 |
+
defaultValue: false,
|
| 883 |
+
description: "Run the ck_tile GEMM tests (default: OFF)")
|
| 884 |
+
booleanParam(
|
| 885 |
+
name: "RUN_TILE_ENGINE_GEMM_TESTS",
|
| 886 |
+
defaultValue: false,
|
| 887 |
+
description: "Run the tile_engine_gemm tests (default: OFF)")
|
| 888 |
+
booleanParam(
|
| 889 |
+
name: "BUILD_INSTANCES_ONLY",
|
| 890 |
+
defaultValue: false,
|
| 891 |
+
description: "Test building instances for various architectures simultaneously (default: OFF)")
|
| 892 |
+
booleanParam(
|
| 893 |
+
name: "BUILD_GFX908",
|
| 894 |
+
defaultValue: false,
|
| 895 |
+
description: "Build CK and run tests on gfx908 (default: OFF)")
|
| 896 |
+
booleanParam(
|
| 897 |
+
name: "BUILD_GFX950",
|
| 898 |
+
defaultValue: false,
|
| 899 |
+
description: "Build CK and run tests on gfx950 (default: OFF)")
|
| 900 |
+
booleanParam(
|
| 901 |
+
name: "BUILD_GFX12",
|
| 902 |
+
defaultValue: true,
|
| 903 |
+
description: "Build CK and run tests on gfx12 (default: ON)")
|
| 904 |
+
booleanParam(
|
| 905 |
+
name: "NINJA_BUILD_TRACE",
|
| 906 |
+
defaultValue: false,
|
| 907 |
+
description: "Generate a ninja build trace (default: OFF)")
|
| 908 |
+
booleanParam(
|
| 909 |
+
name: "BUILD_LEGACY_OS",
|
| 910 |
+
defaultValue: false,
|
| 911 |
+
description: "Try building CK with legacy OS dockers: RHEL8 and SLES15 (default: OFF)")
|
| 912 |
+
booleanParam(
|
| 913 |
+
name: "RUN_INDUCTOR_TESTS",
|
| 914 |
+
defaultValue: true,
|
| 915 |
+
description: "Run inductor codegen tests (default: ON)")
|
| 916 |
+
}
|
| 917 |
+
environment{
|
| 918 |
+
dbuser = "${dbuser}"
|
| 919 |
+
dbpassword = "${dbpassword}"
|
| 920 |
+
dbsship = "${dbsship}"
|
| 921 |
+
dbsshport = "${dbsshport}"
|
| 922 |
+
dbsshuser = "${dbsshuser}"
|
| 923 |
+
dbsshpassword = "${dbsshpassword}"
|
| 924 |
+
ck_git_creds = "${ck_git_creds}"
|
| 925 |
+
gerrit_cred="${gerrit_cred}"
|
| 926 |
+
DOCKER_BUILDKIT = "1"
|
| 927 |
+
}
|
| 928 |
+
stages{
|
| 929 |
+
stage("Build Docker"){
|
| 930 |
+
parallel{
|
| 931 |
+
stage('Docker /opt/rocm'){
|
| 932 |
+
agent{ label rocmnode("nogpu") }
|
| 933 |
+
steps{
|
| 934 |
+
buildDocker('/opt/rocm')
|
| 935 |
+
cleanWs()
|
| 936 |
+
}
|
| 937 |
+
}
|
| 938 |
+
}
|
| 939 |
+
}
|
| 940 |
+
stage("Static checks") {
|
| 941 |
+
parallel{
|
| 942 |
+
stage('Clang Format and Cppcheck') {
|
| 943 |
+
when {
|
| 944 |
+
beforeAgent true
|
| 945 |
+
expression { params.RUN_CPPCHECK.toBoolean() }
|
| 946 |
+
}
|
| 947 |
+
agent{ label rocmnode("nogpu") }
|
| 948 |
+
environment{
|
| 949 |
+
setup_args = "NO_CK_BUILD"
|
| 950 |
+
execute_cmd = "find .. -not -path \'*.git*\' -iname \'*.h\' \
|
| 951 |
+
-o -not -path \'*.git*\' -iname \'*.hpp\' \
|
| 952 |
+
-o -not -path \'*.git*\' -iname \'*.cpp\' \
|
| 953 |
+
-o -iname \'*.h.in\' \
|
| 954 |
+
-o -iname \'*.hpp.in\' \
|
| 955 |
+
-o -iname \'*.cpp.in\' \
|
| 956 |
+
-o -iname \'*.cl\' \
|
| 957 |
+
| grep -v 'build/' \
|
| 958 |
+
| xargs -n 1 -P 1 -I{} -t sh -c \'clang-format-12 -style=file {} | diff - {}\' && \
|
| 959 |
+
/cppcheck/build/bin/cppcheck ../* -v -j \$(nproc) -I ../include -I ../profiler/include -I ../library/include \
|
| 960 |
+
-D CK_ENABLE_FP64 -D CK_ENABLE_FP32 -D CK_ENABLE_FP16 -D CK_ENABLE_FP8 -D CK_ENABLE_BF16 -D CK_ENABLE_BF8 -D CK_ENABLE_INT8 \
|
| 961 |
+
-D __gfx908__ -D __gfx90a__ -D __gfx942__ -D __gfx1030__ -D __gfx1100__ -D __gfx1101__ -D __gfx1102__ \
|
| 962 |
+
-U __gfx803__ -U __gfx900__ -U __gfx906__ -U CK_EXPERIMENTAL_BIT_INT_EXTENSION_INT4 \
|
| 963 |
+
--file-filter=*.cpp --force --enable=all --output-file=ck_cppcheck.log"
|
| 964 |
+
}
|
| 965 |
+
steps{
|
| 966 |
+
buildHipClangJobAndReboot(setup_args:setup_args, setup_cmd: "", build_cmd: "", execute_cmd: execute_cmd, no_reboot:true)
|
| 967 |
+
archiveArtifacts "build/ck_cppcheck.log"
|
| 968 |
+
cleanWs()
|
| 969 |
+
}
|
| 970 |
+
}
|
| 971 |
+
stage('Clang Format') {
|
| 972 |
+
when {
|
| 973 |
+
beforeAgent true
|
| 974 |
+
expression { !params.RUN_CPPCHECK.toBoolean() }
|
| 975 |
+
}
|
| 976 |
+
agent{ label rocmnode("nogpu") }
|
| 977 |
+
environment{
|
| 978 |
+
setup_args = "NO_CK_BUILD"
|
| 979 |
+
execute_cmd = "find .. -not -path \'*.git*\' -iname \'*.h\' \
|
| 980 |
+
-o -not -path \'*.git*\' -iname \'*.hpp\' \
|
| 981 |
+
-o -not -path \'*.git*\' -iname \'*.cpp\' \
|
| 982 |
+
-o -iname \'*.h.in\' \
|
| 983 |
+
-o -iname \'*.hpp.in\' \
|
| 984 |
+
-o -iname \'*.cpp.in\' \
|
| 985 |
+
-o -iname \'*.cl\' \
|
| 986 |
+
| grep -v 'build/' \
|
| 987 |
+
| xargs -n 1 -P 1 -I{} -t sh -c \'clang-format-12 -style=file {} | diff - {}\'"
|
| 988 |
+
}
|
| 989 |
+
steps{
|
| 990 |
+
buildHipClangJobAndReboot(setup_args:setup_args, setup_cmd: "", build_cmd: "", execute_cmd: execute_cmd, no_reboot:true)
|
| 991 |
+
cleanWs()
|
| 992 |
+
}
|
| 993 |
+
}
|
| 994 |
+
}
|
| 995 |
+
}
|
| 996 |
+
stage("Run Grouped Conv Large Case Tests")
|
| 997 |
+
{
|
| 998 |
+
parallel
|
| 999 |
+
{
|
| 1000 |
+
stage("Run Grouped Conv Large Case Tests on gfx90a")
|
| 1001 |
+
{
|
| 1002 |
+
when {
|
| 1003 |
+
beforeAgent true
|
| 1004 |
+
expression { params.RUN_GROUPED_CONV_LARGE_CASES_TESTS.toBoolean() }
|
| 1005 |
+
}
|
| 1006 |
+
agent{ label rocmnode("gfx90a")}
|
| 1007 |
+
environment{
|
| 1008 |
+
setup_args = "NO_CK_BUILD"
|
| 1009 |
+
execute_args = """ ../script/cmake-ck-dev.sh ../ gfx90a && \
|
| 1010 |
+
make -j64 test_grouped_convnd_fwd_large_cases_xdl test_grouped_convnd_bwd_data_xdl_large_cases && \
|
| 1011 |
+
./bin/test_grouped_convnd_fwd_large_cases_xdl && ./bin/test_grouped_convnd_bwd_data_xdl_large_cases"""
|
| 1012 |
+
}
|
| 1013 |
+
steps{
|
| 1014 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1015 |
+
cleanWs()
|
| 1016 |
+
}
|
| 1017 |
+
}
|
| 1018 |
+
}
|
| 1019 |
+
}
|
| 1020 |
+
stage("Run Codegen Tests")
|
| 1021 |
+
{
|
| 1022 |
+
parallel
|
| 1023 |
+
{
|
| 1024 |
+
stage("Run Codegen Tests on gfx90a")
|
| 1025 |
+
{
|
| 1026 |
+
when {
|
| 1027 |
+
beforeAgent true
|
| 1028 |
+
expression { params.RUN_CODEGEN_TESTS.toBoolean() }
|
| 1029 |
+
}
|
| 1030 |
+
agent{ label rocmnode("gfx90a")}
|
| 1031 |
+
environment{
|
| 1032 |
+
setup_args = "NO_CK_BUILD"
|
| 1033 |
+
execute_args = """ CXX=/opt/rocm/llvm/bin/clang++ cmake ../codegen && \
|
| 1034 |
+
make -j64 check"""
|
| 1035 |
+
}
|
| 1036 |
+
steps{
|
| 1037 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1038 |
+
cleanWs()
|
| 1039 |
+
}
|
| 1040 |
+
}
|
| 1041 |
+
}
|
| 1042 |
+
}
|
| 1043 |
+
stage("Run CK_TILE_FMHA Tests")
|
| 1044 |
+
{
|
| 1045 |
+
parallel
|
| 1046 |
+
{
|
| 1047 |
+
stage("Run CK_TILE_FMHA Tests on gfx90a")
|
| 1048 |
+
{
|
| 1049 |
+
when {
|
| 1050 |
+
beforeAgent true
|
| 1051 |
+
expression { params.RUN_CK_TILE_FMHA_TESTS.toBoolean() }
|
| 1052 |
+
}
|
| 1053 |
+
agent{ label rocmnode("gfx90a") }
|
| 1054 |
+
environment{
|
| 1055 |
+
setup_args = "NO_CK_BUILD"
|
| 1056 |
+
execute_args = """ ../script/cmake-ck-dev.sh ../ gfx90a && \
|
| 1057 |
+
make -j64 tile_example_fmha_fwd tile_example_fmha_bwd && \
|
| 1058 |
+
cd ../ &&
|
| 1059 |
+
example/ck_tile/01_fmha/script/run_full_test.sh "CI_${params.COMPILER_VERSION}" "${env.BRANCH_NAME}" "${NODE_NAME}" gfx90a """
|
| 1060 |
+
}
|
| 1061 |
+
steps{
|
| 1062 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1063 |
+
cleanWs()
|
| 1064 |
+
}
|
| 1065 |
+
}
|
| 1066 |
+
stage("Run CK_TILE_FMHA Tests on gfx942")
|
| 1067 |
+
{
|
| 1068 |
+
when {
|
| 1069 |
+
beforeAgent true
|
| 1070 |
+
expression { params.RUN_CK_TILE_FMHA_TESTS.toBoolean() }
|
| 1071 |
+
}
|
| 1072 |
+
agent{ label rocmnode("gfx942") }
|
| 1073 |
+
environment{
|
| 1074 |
+
setup_args = "NO_CK_BUILD"
|
| 1075 |
+
execute_args = """ ../script/cmake-ck-dev.sh ../ gfx942 && \
|
| 1076 |
+
make -j64 tile_example_fmha_fwd tile_example_fmha_bwd && \
|
| 1077 |
+
cd ../ &&
|
| 1078 |
+
example/ck_tile/01_fmha/script/run_full_test.sh "CI_${params.COMPILER_VERSION}" "${env.BRANCH_NAME}" "${NODE_NAME}" gfx942 """
|
| 1079 |
+
}
|
| 1080 |
+
steps{
|
| 1081 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1082 |
+
cleanWs()
|
| 1083 |
+
}
|
| 1084 |
+
}
|
| 1085 |
+
}
|
| 1086 |
+
}
|
| 1087 |
+
stage("Run CK_TILE_TRANSPOSE Tests")
|
| 1088 |
+
{
|
| 1089 |
+
parallel
|
| 1090 |
+
{
|
| 1091 |
+
stage("Run CK_TILE_TRANSPOSE Tests on gfx90a")
|
| 1092 |
+
{
|
| 1093 |
+
when {
|
| 1094 |
+
beforeAgent true
|
| 1095 |
+
expression { params.RUN_CK_TILE_TRANSPOSE_TESTS.toBoolean() }
|
| 1096 |
+
}
|
| 1097 |
+
agent{ label rocmnode("gfx90a") }
|
| 1098 |
+
environment{
|
| 1099 |
+
setup_args = "NO_CK_BUILD"
|
| 1100 |
+
execute_args = """ ../script/cmake-ck-dev.sh ../ gfx90a && \
|
| 1101 |
+
make -j64 tile_example_batched_transpose && \
|
| 1102 |
+
cd ../ &&
|
| 1103 |
+
example/ck_tile/35_batched_transpose/script/run_full_test.sh "CI_${params.COMPILER_VERSION}" "${env.BRANCH_NAME}" "${NODE_NAME}" gfx90a """
|
| 1104 |
+
}
|
| 1105 |
+
steps{
|
| 1106 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1107 |
+
cleanWs()
|
| 1108 |
+
}
|
| 1109 |
+
}
|
| 1110 |
+
stage("Run CK_TILE_TRANSPOSE Tests on gfx942")
|
| 1111 |
+
{
|
| 1112 |
+
when {
|
| 1113 |
+
beforeAgent true
|
| 1114 |
+
expression { params.RUN_CK_TILE_TRANSPOSE_TESTS.toBoolean() }
|
| 1115 |
+
}
|
| 1116 |
+
agent{ label rocmnode("gfx942") }
|
| 1117 |
+
environment{
|
| 1118 |
+
setup_args = "NO_CK_BUILD"
|
| 1119 |
+
execute_args = """ ../script/cmake-ck-dev.sh ../ gfx942 && \
|
| 1120 |
+
make -j64 tile_example_batched_transpose && \
|
| 1121 |
+
cd ../ &&
|
| 1122 |
+
example/ck_tile/35_batched_transpose/script/run_full_test.sh "CI_${params.COMPILER_VERSION}" "${env.BRANCH_NAME}" "${NODE_NAME}" gfx942 """
|
| 1123 |
+
}
|
| 1124 |
+
steps{
|
| 1125 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1126 |
+
cleanWs()
|
| 1127 |
+
}
|
| 1128 |
+
}
|
| 1129 |
+
}
|
| 1130 |
+
}
|
| 1131 |
+
stage("Run CK_TILE_GEMM Tests")
|
| 1132 |
+
{
|
| 1133 |
+
parallel
|
| 1134 |
+
{
|
| 1135 |
+
stage("Run CK_TILE_GEMM Tests on gfx90a")
|
| 1136 |
+
{
|
| 1137 |
+
when {
|
| 1138 |
+
beforeAgent true
|
| 1139 |
+
expression { params.RUN_CK_TILE_GEMM_TESTS.toBoolean() }
|
| 1140 |
+
}
|
| 1141 |
+
agent{ label rocmnode("gfx90a") }
|
| 1142 |
+
environment{
|
| 1143 |
+
setup_args = "NO_CK_BUILD"
|
| 1144 |
+
execute_args = """ ../script/cmake-ck-dev.sh ../ gfx90a && \
|
| 1145 |
+
make -j64 tile_example_gemm_universal && \
|
| 1146 |
+
cd ../ &&
|
| 1147 |
+
example/ck_tile/03_gemm/script/run_full_test.sh "CI_${params.COMPILER_VERSION}" "${env.BRANCH_NAME}" "${NODE_NAME}" gfx90a """
|
| 1148 |
+
}
|
| 1149 |
+
steps{
|
| 1150 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1151 |
+
cleanWs()
|
| 1152 |
+
}
|
| 1153 |
+
}
|
| 1154 |
+
stage("Run CK_TILE_GEMM Tests on gfx942")
|
| 1155 |
+
{
|
| 1156 |
+
when {
|
| 1157 |
+
beforeAgent true
|
| 1158 |
+
expression { params.RUN_CK_TILE_GEMM_TESTS.toBoolean() }
|
| 1159 |
+
}
|
| 1160 |
+
agent{ label rocmnode("gfx942") }
|
| 1161 |
+
environment{
|
| 1162 |
+
setup_args = "NO_CK_BUILD"
|
| 1163 |
+
execute_args = """ ../script/cmake-ck-dev.sh ../ gfx942 && \
|
| 1164 |
+
make -j64 tile_example_gemm_universal && \
|
| 1165 |
+
cd ../ &&
|
| 1166 |
+
example/ck_tile/03_gemm/script/run_full_test.sh "CI_${params.COMPILER_VERSION}" "${env.BRANCH_NAME}" "${NODE_NAME}" gfx942 """
|
| 1167 |
+
}
|
| 1168 |
+
steps{
|
| 1169 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1170 |
+
cleanWs()
|
| 1171 |
+
}
|
| 1172 |
+
}
|
| 1173 |
+
}
|
| 1174 |
+
}
|
| 1175 |
+
stage("Run TILE_ENGINE_GEMM Tests")
|
| 1176 |
+
{
|
| 1177 |
+
parallel
|
| 1178 |
+
{
|
| 1179 |
+
stage("Run TILE_ENGINE_GEMM Tests on gfx90a")
|
| 1180 |
+
{
|
| 1181 |
+
when {
|
| 1182 |
+
beforeAgent true
|
| 1183 |
+
expression { params.RUN_TILE_ENGINE_GEMM_TESTS.toBoolean() }
|
| 1184 |
+
}
|
| 1185 |
+
agent{ label rocmnode("gfx90a") }
|
| 1186 |
+
environment{
|
| 1187 |
+
setup_args = "NO_CK_BUILD"
|
| 1188 |
+
execute_args = """ cmake -G Ninja -D CMAKE_PREFIX_PATH=/opt/rocm \
|
| 1189 |
+
-D CMAKE_CXX_COMPILER="${build_compiler()}" \
|
| 1190 |
+
-D CMAKE_BUILD_TYPE=Release \
|
| 1191 |
+
-D GPU_TARGETS="gfx90a" \
|
| 1192 |
+
-DCMAKE_CXX_FLAGS=" -O3 " .. && \
|
| 1193 |
+
ninja -j64 benchmark_gemm && \
|
| 1194 |
+
./bin/benchmark_gemm """
|
| 1195 |
+
}
|
| 1196 |
+
steps{
|
| 1197 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1198 |
+
cleanWs()
|
| 1199 |
+
}
|
| 1200 |
+
}
|
| 1201 |
+
stage("Run TILE_ENGINE_GEMM Tests on gfx942")
|
| 1202 |
+
{
|
| 1203 |
+
when {
|
| 1204 |
+
beforeAgent true
|
| 1205 |
+
expression { params.RUN_TILE_ENGINE_GEMM_TESTS.toBoolean() }
|
| 1206 |
+
}
|
| 1207 |
+
agent{ label rocmnode("gfx942") }
|
| 1208 |
+
environment{
|
| 1209 |
+
setup_args = "NO_CK_BUILD"
|
| 1210 |
+
execute_args = """ cmake -G Ninja -D CMAKE_PREFIX_PATH=/opt/rocm \
|
| 1211 |
+
-D CMAKE_CXX_COMPILER="${build_compiler()}" \
|
| 1212 |
+
-D CMAKE_BUILD_TYPE=Release \
|
| 1213 |
+
-D GPU_TARGETS="gfx942" \
|
| 1214 |
+
-DCMAKE_CXX_FLAGS=" -O3 " .. && \
|
| 1215 |
+
ninja -j128 benchmark_gemm && \
|
| 1216 |
+
./bin/benchmark_gemm """
|
| 1217 |
+
}
|
| 1218 |
+
steps{
|
| 1219 |
+
buildHipClangJobAndReboot(setup_args:setup_args, no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1220 |
+
cleanWs()
|
| 1221 |
+
}
|
| 1222 |
+
}
|
| 1223 |
+
}
|
| 1224 |
+
}
|
| 1225 |
+
|
| 1226 |
+
stage("Build CK and run Tests")
|
| 1227 |
+
{
|
| 1228 |
+
parallel
|
| 1229 |
+
{
|
| 1230 |
+
stage("Build CK with RHEL8")
|
| 1231 |
+
{
|
| 1232 |
+
when {
|
| 1233 |
+
beforeAgent true
|
| 1234 |
+
expression { params.BUILD_LEGACY_OS.toBoolean() }
|
| 1235 |
+
}
|
| 1236 |
+
agent{ label rocmnode("gfx90a") }
|
| 1237 |
+
environment{
|
| 1238 |
+
def docker_name = "${env.CK_DOCKERHUB_PRIVATE}:ck_rhel8_rocm6.3"
|
| 1239 |
+
setup_args = """ -DGPU_TARGETS="gfx942" \
|
| 1240 |
+
-DCMAKE_CXX_FLAGS=" -O3 " \
|
| 1241 |
+
-DCK_USE_ALTERNATIVE_PYTHON=/opt/Python-3.8.13/bin/python3.8 """
|
| 1242 |
+
execute_args = " "
|
| 1243 |
+
}
|
| 1244 |
+
steps{
|
| 1245 |
+
Build_CK_and_Reboot(setup_args: setup_args, config_targets: " ", no_reboot:true, build_type: 'Release', docker_name: docker_name)
|
| 1246 |
+
cleanWs()
|
| 1247 |
+
}
|
| 1248 |
+
}
|
| 1249 |
+
stage("Build CK with SLES15")
|
| 1250 |
+
{
|
| 1251 |
+
when {
|
| 1252 |
+
beforeAgent true
|
| 1253 |
+
expression { params.BUILD_LEGACY_OS.toBoolean() }
|
| 1254 |
+
}
|
| 1255 |
+
agent{ label rocmnode("gfx90a") }
|
| 1256 |
+
environment{
|
| 1257 |
+
def docker_name = "${env.CK_DOCKERHUB_PRIVATE}:ck_sles15_rocm6.3"
|
| 1258 |
+
setup_args = """ -DGPU_TARGETS="gfx942" \
|
| 1259 |
+
-DCMAKE_CXX_FLAGS=" -O3 " \
|
| 1260 |
+
-DCK_USE_ALTERNATIVE_PYTHON=/opt/Python-3.8.13/bin/python3.8 """
|
| 1261 |
+
execute_args = " "
|
| 1262 |
+
}
|
| 1263 |
+
steps{
|
| 1264 |
+
Build_CK_and_Reboot(setup_args: setup_args, config_targets: " ", no_reboot:true, build_type: 'Release', docker_name: docker_name)
|
| 1265 |
+
cleanWs()
|
| 1266 |
+
}
|
| 1267 |
+
}
|
| 1268 |
+
stage("Build CK and run Tests on gfx942")
|
| 1269 |
+
{
|
| 1270 |
+
when {
|
| 1271 |
+
beforeAgent true
|
| 1272 |
+
expression { params.RUN_FULL_QA.toBoolean() && !params.BUILD_LEGACY_OS.toBoolean() }
|
| 1273 |
+
}
|
| 1274 |
+
agent{ label rocmnode("gfx942") }
|
| 1275 |
+
environment{
|
| 1276 |
+
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install \
|
| 1277 |
+
-DGPU_TARGETS="gfx942" \
|
| 1278 |
+
-DCMAKE_CXX_FLAGS=" -O3 " """
|
| 1279 |
+
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
| 1280 |
+
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
| 1281 |
+
-DGPU_TARGETS="gfx942" \
|
| 1282 |
+
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
| 1283 |
+
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/clang \
|
| 1284 |
+
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
| 1285 |
+
}
|
| 1286 |
+
steps{
|
| 1287 |
+
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
| 1288 |
+
cleanWs()
|
| 1289 |
+
}
|
| 1290 |
+
}
|
| 1291 |
+
stage("Build CK and run Tests on gfx950")
|
| 1292 |
+
{
|
| 1293 |
+
when {
|
| 1294 |
+
beforeAgent true
|
| 1295 |
+
expression { params.BUILD_GFX950.toBoolean() && !params.BUILD_INSTANCES_ONLY.toBoolean() && !params.BUILD_LEGACY_OS.toBoolean() }
|
| 1296 |
+
}
|
| 1297 |
+
agent{ label rocmnode("gfx950") }
|
| 1298 |
+
environment{
|
| 1299 |
+
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install \
|
| 1300 |
+
-DGPU_TARGETS="gfx950" \
|
| 1301 |
+
-DCMAKE_CXX_FLAGS=" -O3 " """
|
| 1302 |
+
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
| 1303 |
+
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
| 1304 |
+
-DGPU_TARGETS="gfx950" \
|
| 1305 |
+
-DCMAKE_CXX_COMPILER=/llvm-project/build/bin/clang++ \
|
| 1306 |
+
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/clang \
|
| 1307 |
+
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
| 1308 |
+
}
|
| 1309 |
+
steps{
|
| 1310 |
+
Build_CK_and_Reboot(setup_args: setup_args, docker_name: "rocm/composable_kernel-private:ck_ub22.04_rocm7.0", config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
| 1311 |
+
cleanWs()
|
| 1312 |
+
}
|
| 1313 |
+
}
|
| 1314 |
+
stage("Build CK and run Tests on gfx908")
|
| 1315 |
+
{
|
| 1316 |
+
when {
|
| 1317 |
+
beforeAgent true
|
| 1318 |
+
expression { params.BUILD_GFX908.toBoolean() && !params.RUN_FULL_QA.toBoolean() && !params.BUILD_INSTANCES_ONLY.toBoolean() && !params.BUILD_LEGACY_OS.toBoolean() }
|
| 1319 |
+
}
|
| 1320 |
+
agent{ label rocmnode("gfx908") }
|
| 1321 |
+
environment{
|
| 1322 |
+
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx908" -DCMAKE_CXX_FLAGS=" -O3 " """
|
| 1323 |
+
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
| 1324 |
+
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
| 1325 |
+
-DGPU_TARGETS="gfx908" \
|
| 1326 |
+
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
| 1327 |
+
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/clang \
|
| 1328 |
+
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
| 1329 |
+
}
|
| 1330 |
+
steps{
|
| 1331 |
+
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
| 1332 |
+
cleanWs()
|
| 1333 |
+
}
|
| 1334 |
+
}
|
| 1335 |
+
stage("Build CK and run Tests on gfx90a")
|
| 1336 |
+
{
|
| 1337 |
+
when {
|
| 1338 |
+
beforeAgent true
|
| 1339 |
+
expression { !params.RUN_FULL_QA.toBoolean() && !params.BUILD_INSTANCES_ONLY.toBoolean() && !params.BUILD_LEGACY_OS.toBoolean() }
|
| 1340 |
+
}
|
| 1341 |
+
agent{ label rocmnode("gfx90a") }
|
| 1342 |
+
environment{
|
| 1343 |
+
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx90a" -DCMAKE_CXX_FLAGS=" -O3 " """
|
| 1344 |
+
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
| 1345 |
+
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
| 1346 |
+
-DGPU_TARGETS="gfx90a" \
|
| 1347 |
+
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
| 1348 |
+
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/clang \
|
| 1349 |
+
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
| 1350 |
+
}
|
| 1351 |
+
steps{
|
| 1352 |
+
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
| 1353 |
+
cleanWs()
|
| 1354 |
+
}
|
| 1355 |
+
}
|
| 1356 |
+
stage("Build CK instances for all supported targets")
|
| 1357 |
+
{
|
| 1358 |
+
when {
|
| 1359 |
+
beforeAgent true
|
| 1360 |
+
expression { params.BUILD_INSTANCES_ONLY.toBoolean() && !params.RUN_FULL_QA.toBoolean() && !params.BUILD_LEGACY_OS.toBoolean() }
|
| 1361 |
+
}
|
| 1362 |
+
agent{ label rocmnode("gfx942") }
|
| 1363 |
+
environment{
|
| 1364 |
+
execute_args = """ cmake -G Ninja -D CMAKE_PREFIX_PATH=/opt/rocm \
|
| 1365 |
+
-D CMAKE_CXX_COMPILER="${build_compiler()}" \
|
| 1366 |
+
-D CMAKE_BUILD_TYPE=Release \
|
| 1367 |
+
-D CMAKE_CXX_FLAGS=" -O3 -ftime-trace" .. && ninja -j64 """
|
| 1368 |
+
}
|
| 1369 |
+
steps{
|
| 1370 |
+
buildHipClangJobAndReboot(setup_cmd: "", build_cmd: "", no_reboot:true, build_type: 'Release', execute_cmd: execute_args)
|
| 1371 |
+
cleanWs()
|
| 1372 |
+
}
|
| 1373 |
+
}
|
| 1374 |
+
stage("Build CK and run Tests on gfx1030")
|
| 1375 |
+
{
|
| 1376 |
+
when {
|
| 1377 |
+
beforeAgent true
|
| 1378 |
+
expression { !params.RUN_FULL_QA.toBoolean() && !params.BUILD_INSTANCES_ONLY.toBoolean() && !params.BUILD_LEGACY_OS.toBoolean() }
|
| 1379 |
+
}
|
| 1380 |
+
agent{ label rocmnode("gfx1030") }
|
| 1381 |
+
environment{
|
| 1382 |
+
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx10-3-generic" -DCMAKE_CXX_FLAGS=" -O3 " """
|
| 1383 |
+
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
| 1384 |
+
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
| 1385 |
+
-DGPU_TARGETS="gfx10-3-generic" \
|
| 1386 |
+
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
| 1387 |
+
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/clang \
|
| 1388 |
+
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
| 1389 |
+
}
|
| 1390 |
+
steps{
|
| 1391 |
+
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
| 1392 |
+
cleanWs()
|
| 1393 |
+
}
|
| 1394 |
+
}
|
| 1395 |
+
stage("Build CK and run Tests on gfx1101")
|
| 1396 |
+
{
|
| 1397 |
+
when {
|
| 1398 |
+
beforeAgent true
|
| 1399 |
+
expression { !params.RUN_FULL_QA.toBoolean() && !params.BUILD_INSTANCES_ONLY.toBoolean() && !params.BUILD_LEGACY_OS.toBoolean() }
|
| 1400 |
+
}
|
| 1401 |
+
agent{ label rocmnode("gfx1101") }
|
| 1402 |
+
environment{
|
| 1403 |
+
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx11-generic" -DCMAKE_CXX_FLAGS=" -O3 " """
|
| 1404 |
+
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
| 1405 |
+
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
| 1406 |
+
-DGPU_TARGETS="gfx11-generic" \
|
| 1407 |
+
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
| 1408 |
+
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/clang \
|
| 1409 |
+
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
| 1410 |
+
}
|
| 1411 |
+
steps{
|
| 1412 |
+
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
| 1413 |
+
cleanWs()
|
| 1414 |
+
}
|
| 1415 |
+
}
|
| 1416 |
+
stage("Build CK and run Tests on gfx1201")
|
| 1417 |
+
{
|
| 1418 |
+
when {
|
| 1419 |
+
beforeAgent true
|
| 1420 |
+
expression { params.BUILD_GFX12.toBoolean() && !params.RUN_FULL_QA.toBoolean() && !params.BUILD_INSTANCES_ONLY.toBoolean() && !params.BUILD_LEGACY_OS.toBoolean() }
|
| 1421 |
+
}
|
| 1422 |
+
agent{ label rocmnode("gfx1201") }
|
| 1423 |
+
environment{
|
| 1424 |
+
setup_args = """ -DCMAKE_INSTALL_PREFIX=../install -DGPU_TARGETS="gfx12-generic" -DCMAKE_CXX_FLAGS=" -O3 " """
|
| 1425 |
+
execute_args = """ cd ../client_example && rm -rf build && mkdir build && cd build && \
|
| 1426 |
+
cmake -DCMAKE_PREFIX_PATH="${env.WORKSPACE}/install;/opt/rocm" \
|
| 1427 |
+
-DGPU_TARGETS="gfx12-generic" \
|
| 1428 |
+
-DCMAKE_CXX_COMPILER="${build_compiler()}" \
|
| 1429 |
+
-DCMAKE_C_COMPILER=/opt/rocm/llvm/bin/clang \
|
| 1430 |
+
-DCMAKE_CXX_FLAGS=" -O3 " .. && make -j """
|
| 1431 |
+
}
|
| 1432 |
+
steps{
|
| 1433 |
+
Build_CK_and_Reboot(setup_args: setup_args, config_targets: "install", no_reboot:true, build_type: 'Release', execute_cmd: execute_args, prefixpath: '/usr/local')
|
| 1434 |
+
cleanWs()
|
| 1435 |
+
}
|
| 1436 |
+
}
|
| 1437 |
+
}
|
| 1438 |
+
}
|
| 1439 |
+
stage("Process Performance Test Results")
|
| 1440 |
+
{
|
| 1441 |
+
parallel
|
| 1442 |
+
{
|
| 1443 |
+
stage("Process results"){
|
| 1444 |
+
when {
|
| 1445 |
+
beforeAgent true
|
| 1446 |
+
expression { params.RUN_PERFORMANCE_TESTS.toBoolean() && !params.BUILD_LEGACY_OS.toBoolean() }
|
| 1447 |
+
}
|
| 1448 |
+
agent { label 'mici' }
|
| 1449 |
+
steps{
|
| 1450 |
+
process_results()
|
| 1451 |
+
cleanWs()
|
| 1452 |
+
}
|
| 1453 |
+
}
|
| 1454 |
+
}
|
| 1455 |
+
}
|
| 1456 |
+
}
|
| 1457 |
+
}
|
Code/Baselines/flash-attention/csrc/composable_kernel/LICENSE
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright (c) 2018- , Advanced Micro Devices, Inc. (Chao Liu, Jing Zhang)
|
| 2 |
+
Copyright (c) 2019- , Advanced Micro Devices, Inc. (Letao Qin, Qianfeng Zhang, Liang Huang, Shaojie Wang)
|
| 3 |
+
Copyright (c) 2022- , Advanced Micro Devices, Inc. (Anthony Chang, Chunyu Lai, Illia Silin, Adam Osewski, Poyen Chen, Jehandad Khan)
|
| 4 |
+
Copyright (c) 2019-2021, Advanced Micro Devices, Inc. (Hanwen Chang)
|
| 5 |
+
Copyright (c) 2019-2020, Advanced Micro Devices, Inc. (Tejash Shah)
|
| 6 |
+
Copyright (c) 2020 , Advanced Micro Devices, Inc. (Xiaoyan Zhou)
|
| 7 |
+
Copyright (c) 2021-2022, Advanced Micro Devices, Inc. (Jianfeng Yan)
|
| 8 |
+
|
| 9 |
+
SPDX-License-Identifier: MIT
|
| 10 |
+
Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
|
| 11 |
+
|
| 12 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 13 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 14 |
+
in the Software without restriction, including without limitation the rights
|
| 15 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 16 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 17 |
+
furnished to do so, subject to the following conditions:
|
| 18 |
+
|
| 19 |
+
The above copyright notice and this permission notice shall be included in all
|
| 20 |
+
copies or substantial portions of the Software.
|
| 21 |
+
|
| 22 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 23 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 24 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 25 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 26 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 27 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 28 |
+
SOFTWARE.
|
Code/Baselines/flash-attention/csrc/composable_kernel/README.md
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Composable Kernel
|
| 2 |
+
|
| 3 |
+
> [!NOTE]
|
| 4 |
+
> The published documentation is available at [Composable Kernel](https://rocm.docs.amd.com/projects/composable_kernel/en/latest/) in an organized, easy-to-read format, with search and a table of contents. The documentation source files reside in the `docs` folder of this repository. As with all ROCm projects, the documentation is open source. For more information on contributing to the documentation, see [Contribute to ROCm documentation](https://rocm.docs.amd.com/en/latest/contribute/contributing.html).
|
| 5 |
+
|
| 6 |
+
The Composable Kernel (CK) library provides a programming model for writing performance-critical
|
| 7 |
+
kernels for machine learning workloads across multiple architectures (GPUs, CPUs, etc.). The CK library
|
| 8 |
+
uses general purpose kernel languages, such as HIP C++.
|
| 9 |
+
|
| 10 |
+
CK uses two concepts to achieve performance portability and code maintainability:
|
| 11 |
+
|
| 12 |
+
* A tile-based programming model
|
| 13 |
+
* Algorithm complexity reduction for complex machine learning (ML) operators. This uses an innovative
|
| 14 |
+
technique called *Tensor Coordinate Transformation*.
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
|
| 18 |
+
The current CK library is structured into four layers:
|
| 19 |
+
|
| 20 |
+
* Templated Tile Operators
|
| 21 |
+
* Templated Kernel and Invoker
|
| 22 |
+
* Instantiated Kernel and Invoker
|
| 23 |
+
* Client API
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
## General information
|
| 28 |
+
|
| 29 |
+
* [CK supported operations](include/ck/README.md)
|
| 30 |
+
* [CK Tile supported operations](include/ck_tile/README.md)
|
| 31 |
+
* [CK wrapper](client_example/25_wrapper/README.md)
|
| 32 |
+
* [CK codegen](codegen/README.md)
|
| 33 |
+
* [CK profiler](profiler/README.md)
|
| 34 |
+
* [Examples (Custom use of CK supported operations)](example/README.md)
|
| 35 |
+
* [Client examples (Use of CK supported operations with instance factory)](client_example/README.md)
|
| 36 |
+
* [Terminology](/TERMINOLOGY.md)
|
| 37 |
+
* [Contributors](/CONTRIBUTORS.md)
|
| 38 |
+
|
| 39 |
+
CK is released under the **[MIT license](/LICENSE)**.
|
| 40 |
+
|
| 41 |
+
## Building CK
|
| 42 |
+
|
| 43 |
+
We recommend building CK inside Docker containers, which include all necessary packages. Pre-built
|
| 44 |
+
Docker images are available on [DockerHub](https://hub.docker.com/r/rocm/composable_kernel/tags).
|
| 45 |
+
|
| 46 |
+
1. To build a new Docker image, use the Dockerfile provided with the source code:
|
| 47 |
+
|
| 48 |
+
```bash
|
| 49 |
+
DOCKER_BUILDKIT=1 docker build -t ck:latest -f Dockerfile .
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
2. Launch the Docker container:
|
| 53 |
+
|
| 54 |
+
```bash
|
| 55 |
+
docker run \
|
| 56 |
+
-it \
|
| 57 |
+
--privileged \
|
| 58 |
+
--group-add sudo \
|
| 59 |
+
-w /root/workspace \
|
| 60 |
+
-v ${PATH_TO_LOCAL_WORKSPACE}:/root/workspace \
|
| 61 |
+
ck:latest \
|
| 62 |
+
/bin/bash
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
3. Clone CK source code from the GitHub repository and start the build:
|
| 66 |
+
|
| 67 |
+
```bash
|
| 68 |
+
git clone https://github.com/ROCm/composable_kernel.git && \
|
| 69 |
+
cd composable_kernel && \
|
| 70 |
+
mkdir build && \
|
| 71 |
+
cd build
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
You must set the `GPU_TARGETS` macro to specify the GPU target architecture(s) you want
|
| 75 |
+
to run CK on. You can specify single or multiple architectures. If you specify multiple architectures,
|
| 76 |
+
use a semicolon between each; for example, `gfx908;gfx90a;gfx942`.
|
| 77 |
+
|
| 78 |
+
```bash
|
| 79 |
+
cmake \
|
| 80 |
+
-D CMAKE_PREFIX_PATH=/opt/rocm \
|
| 81 |
+
-D CMAKE_CXX_COMPILER=/opt/rocm/bin/hipcc \
|
| 82 |
+
-D CMAKE_BUILD_TYPE=Release \
|
| 83 |
+
-D GPU_TARGETS="gfx908;gfx90a" \
|
| 84 |
+
..
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
If you don't set `GPU_TARGETS` on the cmake command line, CK is built for all GPU targets
|
| 88 |
+
supported by the current compiler (this may take a long time).
|
| 89 |
+
Tests and examples will only get built if the GPU_TARGETS is set by the user on the cmake command line.
|
| 90 |
+
|
| 91 |
+
NOTE: If you try setting `GPU_TARGETS` to a list of architectures, the build will only work if the
|
| 92 |
+
architectures are similar, e.g., `gfx908;gfx90a`, or `gfx1100;gfx1101;gfx11012`. Otherwise, if you
|
| 93 |
+
want to build the library for a list of different architectures,
|
| 94 |
+
you should use the `GPU_ARCHS` build argument, for example `GPU_ARCHS=gfx908;gfx1030;gfx1100;gfx942`.
|
| 95 |
+
|
| 96 |
+
4. Build the entire CK library:
|
| 97 |
+
|
| 98 |
+
```bash
|
| 99 |
+
make -j
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
5. Install CK:
|
| 103 |
+
|
| 104 |
+
```bash
|
| 105 |
+
make -j install
|
| 106 |
+
```
|
| 107 |
+
**[See Note on -j](#notes)**
|
| 108 |
+
|
| 109 |
+
## Optional post-install steps
|
| 110 |
+
|
| 111 |
+
* Build examples and tests:
|
| 112 |
+
|
| 113 |
+
```bash
|
| 114 |
+
make -j examples tests
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
* Build and run all examples and tests:
|
| 118 |
+
|
| 119 |
+
```bash
|
| 120 |
+
make -j check
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
You can find instructions for running each individual example in [example](/example).
|
| 124 |
+
|
| 125 |
+
* Build and run smoke/regression examples and tests:
|
| 126 |
+
|
| 127 |
+
```bash
|
| 128 |
+
make -j smoke # tests and examples that run for < 30 seconds each
|
| 129 |
+
```
|
| 130 |
+
```bash
|
| 131 |
+
make -j regression # tests and examples that run for >= 30 seconds each
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
* Build ckProfiler:
|
| 135 |
+
|
| 136 |
+
```bash
|
| 137 |
+
make -j ckProfiler
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
You can find instructions for running ckProfiler in [profiler](/profiler).
|
| 141 |
+
|
| 142 |
+
* Build our documentation locally:
|
| 143 |
+
|
| 144 |
+
``` bash
|
| 145 |
+
cd docs
|
| 146 |
+
pip3 install -r sphinx/requirements.txt
|
| 147 |
+
python3 -m sphinx -T -E -b html -d _build/doctrees -D language=en . _build/html
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
### Notes
|
| 151 |
+
The `-j` option for building with multiple threads in parallel, which speeds up the build significantly.
|
| 152 |
+
However, `-j` launches unlimited number of threads, which can cause the build to run out of memory and
|
| 153 |
+
crash. On average, you should expect each thread to use ~2Gb of RAM.
|
| 154 |
+
Depending on the number of CPU cores and the amount of RAM on your system, you may want to
|
| 155 |
+
limit the number of threads. For example, if you have a 128-core CPU and 128 Gb of RAM it's advisable to use `-j32`.
|
| 156 |
+
|
| 157 |
+
Additional cmake flags can be used to significantly speed-up the build:
|
| 158 |
+
|
| 159 |
+
* `DTYPES` (default is not set) can be set to any subset of "fp64;fp32;fp16;fp8;bf16;int8" to build
|
| 160 |
+
instances of select data types only. The main default data types are fp32 and fp16; you can safely skip
|
| 161 |
+
other data types.
|
| 162 |
+
|
| 163 |
+
* `DISABLE_DL_KERNELS` (default is OFF) must be set to ON in order not to build instances, such as `gemm_dl` or
|
| 164 |
+
`batched_gemm_multi_d_dl`. These instances are useful on architectures like the NAVI2x, as most
|
| 165 |
+
other platforms have faster instances, such as `xdl` or `wmma`, available.
|
| 166 |
+
|
| 167 |
+
* `DISABLE_DPP_KERNELS` (default is OFF) must be set to ON in order not to build instances, such as `gemm_dpp`.
|
| 168 |
+
These instances offer a slightly better performance of fp16 gemms on NAVI2x. But on other architectures faster alternatives are available.
|
| 169 |
+
|
| 170 |
+
* `CK_USE_FP8_ON_UNSUPPORTED_ARCH` (default is OFF) must be set to ON in order to build instances,
|
| 171 |
+
such as `gemm_universal`, `gemm_universal_streamk` and `gemm_multiply_multiply` for fp8 data type for GPU targets which do not have native support for fp8 data type, such as gfx908 or gfx90a. These instances are useful on
|
| 172 |
+
architectures like the MI100/MI200 for the functional support only.
|
| 173 |
+
|
| 174 |
+
## Using sccache for building
|
| 175 |
+
|
| 176 |
+
The default CK Docker images come with a pre-installed version of sccache, which supports clang
|
| 177 |
+
being used as hip-compiler (" -x hip"). Using sccache can help reduce the time to re-build code from
|
| 178 |
+
hours to 1-2 minutes. In order to invoke sccache, you need to run:
|
| 179 |
+
|
| 180 |
+
```bash
|
| 181 |
+
sccache --start-server
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
then add the following flags to the cmake command line:
|
| 185 |
+
|
| 186 |
+
```bash
|
| 187 |
+
-DCMAKE_CXX_COMPILER_LAUNCHER=sccache -DCMAKE_C_COMPILER_LAUNCHER=sccache
|
| 188 |
+
```
|
| 189 |
+
|
| 190 |
+
You may need to clean up the build folder and repeat the cmake and make steps in order to take
|
| 191 |
+
advantage of the sccache during subsequent builds.
|
| 192 |
+
|
| 193 |
+
## Using CK as pre-built kernel library
|
| 194 |
+
|
| 195 |
+
You can find instructions for using CK as a pre-built kernel library in [client_example](/client_example).
|
| 196 |
+
|
| 197 |
+
## Contributing to CK
|
| 198 |
+
|
| 199 |
+
When you contribute to CK, make sure you run `clang-format` on all changed files. We highly
|
| 200 |
+
recommend using git hooks that are managed by the `pre-commit` framework. To install hooks, run:
|
| 201 |
+
|
| 202 |
+
```bash
|
| 203 |
+
sudo script/install_precommit.sh
|
| 204 |
+
```
|
| 205 |
+
|
| 206 |
+
With this approach, `pre-commit` adds the appropriate hooks to your local repository and
|
| 207 |
+
automatically runs `clang-format` (and possibly additional checks) before any commit is created.
|
| 208 |
+
|
| 209 |
+
If you need to uninstall hooks from the repository, you can do so by running the following command:
|
| 210 |
+
|
| 211 |
+
```bash
|
| 212 |
+
script/uninstall_precommit.sh
|
| 213 |
+
```
|
| 214 |
+
|
| 215 |
+
If you need to temporarily disable pre-commit hooks, you can add the `--no-verify` option to the
|
| 216 |
+
`git commit` command.
|
Code/Baselines/flash-attention/csrc/composable_kernel/TERMINOLOGY.md
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[Back to the main page](./README.md)
|
| 2 |
+
# Composable Kernel terminology
|
Code/Baselines/flash-attention/csrc/composable_kernel/dev-requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ROCm/rocm-recipes
|
| 2 |
+
ROCm/rocm-cmake@04f694df2a8dc9d7e35fa4dee4ba5fa407ec04f8 --build
|
| 3 |
+
danmar/cppcheck@2.9
|
Code/Baselines/flash-attention/csrc/composable_kernel/pyproject.toml
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["setuptools", "setuptools-scm"]
|
| 3 |
+
build-backend = "setuptools.build_meta"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "rocm-composable-kernel"
|
| 7 |
+
dynamic = ["version"]
|
| 8 |
+
description = "Composable Kernel, performance-critical kernels for machine learning workloads"
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
requires-python = ">=3.8"
|
| 11 |
+
license = {file = "LICENSE"}
|
| 12 |
+
classifiers = [
|
| 13 |
+
"Programming Language :: Python :: 3",
|
| 14 |
+
"License :: OSI Approved :: MIT License",
|
| 15 |
+
"Operating System :: OS Independent",
|
| 16 |
+
]
|
| 17 |
+
dependencies = []
|
| 18 |
+
|
| 19 |
+
[project.urls]
|
| 20 |
+
"Homepage" = "https://github.com/rocm/composable_kernel"
|
| 21 |
+
"Bug Tracker" = "https://github.com/rocm/composable_kernel/issues"
|
| 22 |
+
|
| 23 |
+
[tool.setuptools]
|
| 24 |
+
packages = ["ck4inductor", "ck4inductor.include", "ck4inductor.library", "ck4inductor.universal_gemm", "ck4inductor.batched_universal_gemm", "ck4inductor.grouped_conv_fwd"]
|
| 25 |
+
|
| 26 |
+
[tool.setuptools.package-dir]
|
| 27 |
+
ck4inductor = "python/ck4inductor"
|
| 28 |
+
"ck4inductor.universal_gemm" = "python/ck4inductor/universal_gemm"
|
| 29 |
+
"ck4inductor.batched_universal_gemm" = "python/ck4inductor/batched_universal_gemm"
|
| 30 |
+
"ck4inductor.grouped_conv_fwd" = "python/ck4inductor/grouped_conv_fwd"
|
| 31 |
+
"ck4inductor.include" = "include"
|
| 32 |
+
"ck4inductor.library" = "library"
|
| 33 |
+
|
| 34 |
+
[tool.setuptools.package-data]
|
| 35 |
+
"ck4inductor.include" = ["ck/**/*.hpp"]
|
| 36 |
+
"ck4inductor.library" = ["src/tensor_operation_instance/gpu/gemm_universal/**/*.hpp", "src/tensor_operation_instance/gpu/gemm_universal_batched/**/*.hpp", "include/ck/library/tensor_operation_instance/gpu/grouped_conv_fwd/**/*.hpp"]
|
| 37 |
+
|
| 38 |
+
[tool.setuptools.dynamic]
|
| 39 |
+
version = { attr = "setuptools_scm.get_version" }
|
Code/Baselines/flash-attention/csrc/composable_kernel/rbuild.ini
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[develop]
|
| 2 |
+
cxx = ${rocm_path}/bin/hipcc
|
| 3 |
+
cc = ${rocm_path}/llvm/bin/clang
|
| 4 |
+
ignore = pcre
|
| 5 |
+
deps =
|
| 6 |
+
-f dev-requirements.txt
|
| 7 |
+
define =
|
| 8 |
+
BUILD_DEV=On
|
Code/Baselines/flash-attention/csrc/composable_kernel/requirements.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
Code/Baselines/flash-attention/csrc/cutlass/cmake/CTestTestfile.test.configure.cmake
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2017 - 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
#
|
| 4 |
+
# Redistribution and use in source and binary forms, with or without
|
| 5 |
+
# modification, are permitted provided that the following conditions are met:
|
| 6 |
+
#
|
| 7 |
+
# 1. Redistributions of source code must retain the above copyright notice, this
|
| 8 |
+
# list of conditions and the following disclaimer.
|
| 9 |
+
#
|
| 10 |
+
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 11 |
+
# this list of conditions and the following disclaimer in the documentation
|
| 12 |
+
# and/or other materials provided with the distribution.
|
| 13 |
+
#
|
| 14 |
+
# 3. Neither the name of the copyright holder nor the names of its
|
| 15 |
+
# contributors may be used to endorse or promote products derived from
|
| 16 |
+
# this software without specific prior written permission.
|
| 17 |
+
#
|
| 18 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 19 |
+
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 20 |
+
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 21 |
+
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 22 |
+
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 23 |
+
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 24 |
+
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 25 |
+
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 26 |
+
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 27 |
+
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 28 |
+
|
| 29 |
+
if (CUTLASS_USE_EXTENDED_ADD_TEST_FORMAT)
|
| 30 |
+
# The longform/extended format allows generator expressions to be
|
| 31 |
+
# expanded property and is useful in contexts where the files need
|
| 32 |
+
# to be immediately included into being-processed cmake code.
|
| 33 |
+
add_test(NAME @TESTCASE_NAME@ COMMAND ${_CUTLASS_TEST_EXECUTION_ENVIRONMENT} "${TEST_EXE_PATH}" @TEST_COMMAND_OPTIONS@)
|
| 34 |
+
else()
|
| 35 |
+
add_test(@TESTCASE_NAME@ ${_CUTLASS_TEST_EXECUTION_ENVIRONMENT} "${TEST_EXE_PATH}" @TEST_COMMAND_OPTIONS@)
|
| 36 |
+
endif()
|
| 37 |
+
|
| 38 |
+
if (TEST_EXE_WORKING_DIRECTORY)
|
| 39 |
+
set_tests_properties(@TESTCASE_NAME@ PROPERTIES WORKING_DIRECTORY "${TEST_EXE_WORKING_DIRECTORY}")
|
| 40 |
+
endif()
|
| 41 |
+
|
| 42 |
+
set_tests_properties(@TESTCASE_NAME@ PROPERTIES DISABLED @__DISABLE_TESTS@)
|
| 43 |
+
|
Code/Baselines/flash-attention/csrc/flash_attn/flash_api.cpp
ADDED
|
@@ -0,0 +1,1485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/******************************************************************************
|
| 2 |
+
* Copyright (c) 2024, Tri Dao.
|
| 3 |
+
******************************************************************************/
|
| 4 |
+
|
| 5 |
+
// Include these 2 headers instead of torch/extension.h since we don't need all of the torch headers.
|
| 6 |
+
#include <torch/python.h>
|
| 7 |
+
#include <torch/nn/functional.h>
|
| 8 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 9 |
+
#include <c10/cuda/CUDAStream.h>
|
| 10 |
+
#include <ATen/cuda/CUDAGeneratorImpl.h> // For at::Generator and at::PhiloxCudaState
|
| 11 |
+
#include "philox_unpack.cuh" // For at::cuda::philox::unpack
|
| 12 |
+
|
| 13 |
+
#include <cutlass/numeric_types.h>
|
| 14 |
+
|
| 15 |
+
#include "namespace_config.h"
|
| 16 |
+
#include "hardware_info.h"
|
| 17 |
+
#include "flash.h"
|
| 18 |
+
#include "static_switch.h"
|
| 19 |
+
|
| 20 |
+
#define CHECK_DEVICE(x) TORCH_CHECK(x.is_cuda(), #x " must be on CUDA")
|
| 21 |
+
#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
|
| 22 |
+
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
| 23 |
+
|
| 24 |
+
namespace FLASH_NAMESPACE {
|
| 25 |
+
|
| 26 |
+
void set_params_fprop(Flash_fwd_params ¶ms,
|
| 27 |
+
// sizes
|
| 28 |
+
const size_t b,
|
| 29 |
+
const size_t seqlen_q,
|
| 30 |
+
const size_t seqlen_k,
|
| 31 |
+
const size_t seqlen_q_rounded,
|
| 32 |
+
const size_t seqlen_k_rounded,
|
| 33 |
+
const size_t h,
|
| 34 |
+
const size_t h_k,
|
| 35 |
+
const size_t d,
|
| 36 |
+
const size_t d_rounded,
|
| 37 |
+
// device pointers
|
| 38 |
+
const at::Tensor q,
|
| 39 |
+
const at::Tensor k,
|
| 40 |
+
const at::Tensor v,
|
| 41 |
+
at::Tensor out,
|
| 42 |
+
void *cu_seqlens_q_d,
|
| 43 |
+
void *cu_seqlens_k_d,
|
| 44 |
+
void *seqused_k,
|
| 45 |
+
void *p_d,
|
| 46 |
+
void *softmax_lse_d,
|
| 47 |
+
float p_dropout,
|
| 48 |
+
float softmax_scale,
|
| 49 |
+
int window_size_left,
|
| 50 |
+
int window_size_right,
|
| 51 |
+
const float softcap,
|
| 52 |
+
bool seqlenq_ngroups_swapped=false,
|
| 53 |
+
const bool unpadded_lse=false) {
|
| 54 |
+
|
| 55 |
+
// Reset the parameters
|
| 56 |
+
params = {};
|
| 57 |
+
|
| 58 |
+
params.is_bf16 = q.dtype() == torch::kBFloat16;
|
| 59 |
+
|
| 60 |
+
// Set the pointers and strides.
|
| 61 |
+
params.q_ptr = q.data_ptr();
|
| 62 |
+
params.k_ptr = k.data_ptr();
|
| 63 |
+
params.v_ptr = v.data_ptr();
|
| 64 |
+
// All stride are in elements, not bytes.
|
| 65 |
+
params.q_row_stride = q.stride(-3);
|
| 66 |
+
params.k_row_stride = k.stride(-3);
|
| 67 |
+
params.v_row_stride = v.stride(-3);
|
| 68 |
+
params.q_head_stride = q.stride(-2);
|
| 69 |
+
params.k_head_stride = k.stride(-2);
|
| 70 |
+
params.v_head_stride = v.stride(-2);
|
| 71 |
+
params.o_ptr = out.data_ptr();
|
| 72 |
+
params.o_row_stride = out.stride(-3);
|
| 73 |
+
params.o_head_stride = out.stride(-2);
|
| 74 |
+
|
| 75 |
+
if (cu_seqlens_q_d == nullptr) {
|
| 76 |
+
params.q_batch_stride = q.stride(0);
|
| 77 |
+
params.k_batch_stride = k.stride(0);
|
| 78 |
+
params.v_batch_stride = v.stride(0);
|
| 79 |
+
params.o_batch_stride = out.stride(0);
|
| 80 |
+
if (seqlenq_ngroups_swapped) {
|
| 81 |
+
params.q_batch_stride *= seqlen_q;
|
| 82 |
+
params.o_batch_stride *= seqlen_q;
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
params.cu_seqlens_q = static_cast<int *>(cu_seqlens_q_d);
|
| 87 |
+
params.cu_seqlens_k = static_cast<int *>(cu_seqlens_k_d);
|
| 88 |
+
params.seqused_k = static_cast<int *>(seqused_k);
|
| 89 |
+
|
| 90 |
+
// P = softmax(QK^T)
|
| 91 |
+
params.p_ptr = p_d;
|
| 92 |
+
|
| 93 |
+
// Softmax sum
|
| 94 |
+
params.softmax_lse_ptr = softmax_lse_d;
|
| 95 |
+
|
| 96 |
+
// Set the dimensions.
|
| 97 |
+
params.b = b;
|
| 98 |
+
params.h = h;
|
| 99 |
+
params.h_k = h_k;
|
| 100 |
+
params.h_h_k_ratio = h / h_k;
|
| 101 |
+
params.seqlen_q = seqlen_q;
|
| 102 |
+
params.seqlen_k = seqlen_k;
|
| 103 |
+
params.seqlen_q_rounded = seqlen_q_rounded;
|
| 104 |
+
params.seqlen_k_rounded = seqlen_k_rounded;
|
| 105 |
+
params.d = d;
|
| 106 |
+
params.d_rounded = d_rounded;
|
| 107 |
+
|
| 108 |
+
// Set the different scale values.
|
| 109 |
+
#ifdef FLASHATTENTION_DISABLE_SOFTCAP
|
| 110 |
+
TORCH_CHECK(softcap <= 0.0, "This flash attention build does not support softcap.");
|
| 111 |
+
#endif
|
| 112 |
+
if (softcap > 0.0) {
|
| 113 |
+
params.softcap = softmax_scale / softcap;
|
| 114 |
+
params.scale_softmax = softcap;
|
| 115 |
+
params.scale_softmax_log2 = softcap * M_LOG2E;
|
| 116 |
+
} else{
|
| 117 |
+
// Remove potential NaN
|
| 118 |
+
params.softcap = 0.0;
|
| 119 |
+
params.scale_softmax = softmax_scale;
|
| 120 |
+
params.scale_softmax_log2 = softmax_scale * M_LOG2E;
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
// Set this to probability of keeping an element to simplify things.
|
| 124 |
+
params.p_dropout = 1.f - p_dropout;
|
| 125 |
+
// Convert p from float to int so we don't have to convert the random uint to float to compare.
|
| 126 |
+
// [Minor] We want to round down since when we do the comparison we use <= instead of <
|
| 127 |
+
// params.p_dropout_in_uint = uint32_t(std::floor(params.p_dropout * 4294967295.0));
|
| 128 |
+
// params.p_dropout_in_uint16_t = uint16_t(std::floor(params.p_dropout * 65535.0));
|
| 129 |
+
params.p_dropout_in_uint8_t = uint8_t(std::floor(params.p_dropout * 255.0));
|
| 130 |
+
params.rp_dropout = 1.f / params.p_dropout;
|
| 131 |
+
params.scale_softmax_rp_dropout = params.rp_dropout * params.scale_softmax;
|
| 132 |
+
TORCH_CHECK(p_dropout < 1.f);
|
| 133 |
+
#ifdef FLASHATTENTION_DISABLE_DROPOUT
|
| 134 |
+
TORCH_CHECK(p_dropout == 0.0f, "This flash attention build does not support dropout.");
|
| 135 |
+
#endif
|
| 136 |
+
|
| 137 |
+
// Causal is the special case where window_size_right == 0 and window_size_left < 0.
|
| 138 |
+
// Local is the more general case where window_size_right >= 0 or window_size_left >= 0.
|
| 139 |
+
params.is_causal = window_size_left < 0 && window_size_right == 0;
|
| 140 |
+
|
| 141 |
+
if (window_size_left < 0 && window_size_right >= 0) { window_size_left = seqlen_k; }
|
| 142 |
+
if (window_size_left >= 0 && window_size_right < 0) { window_size_right = seqlen_k; }
|
| 143 |
+
params.window_size_left = window_size_left;
|
| 144 |
+
params.window_size_right = window_size_right;
|
| 145 |
+
|
| 146 |
+
#ifdef FLASHATTENTION_DISABLE_LOCAL
|
| 147 |
+
TORCH_CHECK(params.is_causal || (window_size_left < 0 && window_size_right < 0),
|
| 148 |
+
"This flash attention build does not support local attention.");
|
| 149 |
+
#endif
|
| 150 |
+
|
| 151 |
+
params.is_seqlens_k_cumulative = true;
|
| 152 |
+
|
| 153 |
+
#ifdef FLASHATTENTION_DISABLE_UNEVEN_K
|
| 154 |
+
TORCH_CHECK(d == d_rounded, "This flash attention build does not support headdim not being a multiple of 32.");
|
| 155 |
+
#endif
|
| 156 |
+
|
| 157 |
+
params.unpadded_lse = unpadded_lse;
|
| 158 |
+
params.seqlenq_ngroups_swapped = seqlenq_ngroups_swapped;
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
void set_params_dgrad(Flash_bwd_params ¶ms,
|
| 162 |
+
// sizes
|
| 163 |
+
const size_t b,
|
| 164 |
+
const size_t seqlen_q,
|
| 165 |
+
const size_t seqlen_k,
|
| 166 |
+
const size_t seqlen_q_rounded,
|
| 167 |
+
const size_t seqlen_k_rounded,
|
| 168 |
+
const size_t h,
|
| 169 |
+
const size_t h_k,
|
| 170 |
+
const size_t d,
|
| 171 |
+
const size_t d_rounded,
|
| 172 |
+
// device pointers
|
| 173 |
+
const at::Tensor q,
|
| 174 |
+
const at::Tensor k,
|
| 175 |
+
const at::Tensor v,
|
| 176 |
+
const at::Tensor out,
|
| 177 |
+
const at::Tensor dout,
|
| 178 |
+
at::Tensor dq,
|
| 179 |
+
at::Tensor dk,
|
| 180 |
+
at::Tensor dv,
|
| 181 |
+
void *cu_seqlens_q_d,
|
| 182 |
+
void *cu_seqlens_k_d,
|
| 183 |
+
void *dq_accum_d,
|
| 184 |
+
void *dk_accum_d,
|
| 185 |
+
void *dv_accum_d,
|
| 186 |
+
void *softmax_lse_d,
|
| 187 |
+
void *dsoftmax_sum_d,
|
| 188 |
+
float p_dropout,
|
| 189 |
+
float softmax_scale,
|
| 190 |
+
int window_size_left,
|
| 191 |
+
int window_size_right,
|
| 192 |
+
const float softcap,
|
| 193 |
+
bool deterministic,
|
| 194 |
+
const bool unpadded_lse) {
|
| 195 |
+
|
| 196 |
+
set_params_fprop(params,
|
| 197 |
+
b, seqlen_q, seqlen_k, seqlen_q_rounded, seqlen_k_rounded, h, h_k, d, d_rounded,
|
| 198 |
+
q, k, v, out,
|
| 199 |
+
cu_seqlens_q_d,
|
| 200 |
+
cu_seqlens_k_d,
|
| 201 |
+
nullptr,
|
| 202 |
+
nullptr,
|
| 203 |
+
softmax_lse_d,
|
| 204 |
+
p_dropout,
|
| 205 |
+
softmax_scale,
|
| 206 |
+
window_size_left,
|
| 207 |
+
window_size_right,
|
| 208 |
+
softcap,
|
| 209 |
+
false, // seqlenq_ngroups_swapped
|
| 210 |
+
unpadded_lse);
|
| 211 |
+
|
| 212 |
+
// Set the pointers and strides.
|
| 213 |
+
params.do_ptr = dout.data_ptr();
|
| 214 |
+
params.do_row_stride = dout.stride(-3);
|
| 215 |
+
params.do_head_stride = dout.stride(-2);
|
| 216 |
+
params.dq_ptr = dq.data_ptr();
|
| 217 |
+
params.dk_ptr = dk.data_ptr();
|
| 218 |
+
params.dv_ptr = dv.data_ptr();
|
| 219 |
+
params.dq_row_stride = dq.stride(-3);
|
| 220 |
+
params.dk_row_stride = dk.stride(-3);
|
| 221 |
+
params.dv_row_stride = dv.stride(-3);
|
| 222 |
+
params.dq_head_stride = dq.stride(-2);
|
| 223 |
+
params.dk_head_stride = dk.stride(-2);
|
| 224 |
+
params.dv_head_stride = dv.stride(-2);
|
| 225 |
+
|
| 226 |
+
if (cu_seqlens_q_d == nullptr) {
|
| 227 |
+
params.do_batch_stride = dout.stride(0);
|
| 228 |
+
params.dq_batch_stride = dq.stride(0);
|
| 229 |
+
params.dk_batch_stride = dk.stride(0);
|
| 230 |
+
params.dv_batch_stride = dv.stride(0);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
params.dq_accum_ptr = dq_accum_d;
|
| 234 |
+
params.dk_accum_ptr = dk_accum_d;
|
| 235 |
+
params.dv_accum_ptr = dv_accum_d;
|
| 236 |
+
|
| 237 |
+
// Softmax sum
|
| 238 |
+
params.dsoftmax_sum = dsoftmax_sum_d;
|
| 239 |
+
|
| 240 |
+
params.deterministic = deterministic;
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
void run_mha_fwd(Flash_fwd_params ¶ms, cudaStream_t stream, bool force_split_kernel=false) {
|
| 244 |
+
FP16_SWITCH(!params.is_bf16, [&] {
|
| 245 |
+
HEADDIM_SWITCH(params.d, [&] {
|
| 246 |
+
BOOL_SWITCH(params.is_causal, Is_causal, [&] {
|
| 247 |
+
if (params.num_splits <= 1 && !force_split_kernel) { // If we don't set it num_splits == 0
|
| 248 |
+
run_mha_fwd_<elem_type, kHeadDim, Is_causal>(params, stream);
|
| 249 |
+
} else {
|
| 250 |
+
run_mha_fwd_splitkv_dispatch<elem_type, kHeadDim, Is_causal>(params, stream);
|
| 251 |
+
}
|
| 252 |
+
});
|
| 253 |
+
});
|
| 254 |
+
});
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
// Find the number of splits that maximizes the occupancy. For example, if we have
|
| 258 |
+
// batch * n_heads = 48 and we have 108 SMs, having 2 splits (efficiency = 0.89) is
|
| 259 |
+
// better than having 3 splits (efficiency = 0.67). However, we also don't want too many
|
| 260 |
+
// splits as that would incur more HBM reads/writes.
|
| 261 |
+
// So we find the best efficiency, then find the smallest number of splits that gets 85%
|
| 262 |
+
// of the best efficiency.
|
| 263 |
+
inline int num_splits_heuristic(int batch_nheads_mblocks, int num_SMs, int num_n_blocks, int max_splits) {
|
| 264 |
+
// If we have enough to almost fill the SMs, then just use 1 split
|
| 265 |
+
if (batch_nheads_mblocks >= 0.8f * num_SMs) { return 1; }
|
| 266 |
+
max_splits = std::min({max_splits, num_SMs, num_n_blocks});
|
| 267 |
+
float max_efficiency = 0.f;
|
| 268 |
+
std::vector<float> efficiency;
|
| 269 |
+
efficiency.reserve(max_splits);
|
| 270 |
+
auto ceildiv = [](int a, int b) { return (a + b - 1) / b; };
|
| 271 |
+
// Some splits are not eligible. For example, if we have 64 blocks and choose 11 splits,
|
| 272 |
+
// we'll have 6 * 10 + 4 blocks. If we choose 12 splits, we'll have 6 * 11 + (-2) blocks
|
| 273 |
+
// (i.e. it's 11 splits anyway).
|
| 274 |
+
// So we check if the number of blocks per split is the same as the previous num_splits.
|
| 275 |
+
auto is_split_eligible = [&ceildiv, &num_n_blocks](int num_splits) {
|
| 276 |
+
return num_splits == 1 || ceildiv(num_n_blocks, num_splits) != ceildiv(num_n_blocks, num_splits - 1);
|
| 277 |
+
};
|
| 278 |
+
for (int num_splits = 1; num_splits <= max_splits; num_splits++) {
|
| 279 |
+
if (!is_split_eligible(num_splits)) {
|
| 280 |
+
efficiency.push_back(0.f);
|
| 281 |
+
} else {
|
| 282 |
+
float n_waves = float(batch_nheads_mblocks * num_splits) / num_SMs;
|
| 283 |
+
float eff = n_waves / ceil(n_waves);
|
| 284 |
+
// printf("num_splits = %d, eff = %f\n", num_splits, eff);
|
| 285 |
+
if (eff > max_efficiency) { max_efficiency = eff; }
|
| 286 |
+
efficiency.push_back(eff);
|
| 287 |
+
}
|
| 288 |
+
}
|
| 289 |
+
for (int num_splits = 1; num_splits <= max_splits; num_splits++) {
|
| 290 |
+
if (!is_split_eligible(num_splits)) { continue; }
|
| 291 |
+
if (efficiency[num_splits - 1] >= 0.85 * max_efficiency) {
|
| 292 |
+
// printf("num_splits chosen = %d\n", num_splits);
|
| 293 |
+
return num_splits;
|
| 294 |
+
}
|
| 295 |
+
}
|
| 296 |
+
return 1;
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
std::tuple<at::Tensor, at::Tensor> set_params_splitkv(Flash_fwd_params ¶ms, const int batch_size,
|
| 300 |
+
const int num_heads, const int head_size, const int max_seqlen_k, const int max_seqlen_q,
|
| 301 |
+
const int head_size_rounded, const float p_dropout,
|
| 302 |
+
const int num_splits, const int num_sm, struct c10::TensorOptions opts) {
|
| 303 |
+
|
| 304 |
+
// This needs to match with run_mha_fwd_splitkv_dispatch
|
| 305 |
+
const int block_n = head_size <= 64 ? 256 : (head_size <= 128 ? 128 : 64);
|
| 306 |
+
const int num_n_blocks = (max_seqlen_k + block_n - 1) / block_n;
|
| 307 |
+
// Technically kBlockM = 64 only for the splitKV kernels, not the standard kernel.
|
| 308 |
+
// In any case we don't expect seqlen_q to be larger than 64 for inference.
|
| 309 |
+
const int num_m_blocks = (max_seqlen_q + 64 - 1) / 64;
|
| 310 |
+
params.num_splits = num_splits;
|
| 311 |
+
at::Tensor softmax_lse_accum;
|
| 312 |
+
at::Tensor out_accum;
|
| 313 |
+
|
| 314 |
+
if (p_dropout == 0.0f) { // SplitKV is not implemented for dropout
|
| 315 |
+
if (num_splits < 1) {
|
| 316 |
+
// We multiply number of SMs by 2 to hard-code the fact that we're using 128 threads per block.
|
| 317 |
+
params.num_splits = num_splits_heuristic(batch_size * num_heads * num_m_blocks, num_sm * 2, num_n_blocks, 128);
|
| 318 |
+
}
|
| 319 |
+
if (params.num_splits > 1) {
|
| 320 |
+
softmax_lse_accum = torch::empty({params.num_splits, batch_size, num_heads, max_seqlen_q}, opts.dtype(at::kFloat));
|
| 321 |
+
out_accum = torch::empty({params.num_splits, batch_size, num_heads, max_seqlen_q, head_size_rounded}, opts.dtype(at::kFloat));
|
| 322 |
+
params.softmax_lseaccum_ptr = softmax_lse_accum.data_ptr();
|
| 323 |
+
params.oaccum_ptr = out_accum.data_ptr();
|
| 324 |
+
}
|
| 325 |
+
TORCH_CHECK(params.num_splits <= 128, "num_splits > 128 not supported");
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
return std::make_tuple(softmax_lse_accum, out_accum);
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
void set_params_alibi(Flash_fwd_params ¶ms, std::optional<at::Tensor> &alibi_slopes_, int batch_size, int num_heads){
|
| 332 |
+
#ifdef FLASHATTENTION_DISABLE_ALIBI
|
| 333 |
+
TORCH_CHECK(!alibi_slopes_.has_value(), "This flash attention build does not support alibi.");
|
| 334 |
+
params.alibi_slopes_ptr = nullptr;
|
| 335 |
+
#else
|
| 336 |
+
if (alibi_slopes_.has_value()) {
|
| 337 |
+
auto alibi_slopes = alibi_slopes_.value();
|
| 338 |
+
TORCH_CHECK(alibi_slopes.dtype() == torch::kFloat32, "ALiBi slopes must have dtype fp32");
|
| 339 |
+
CHECK_DEVICE(alibi_slopes);
|
| 340 |
+
TORCH_CHECK(alibi_slopes.stride(-1) == 1, "ALiBi slopes tensor must have contiguous last dimension");
|
| 341 |
+
TORCH_CHECK(alibi_slopes.sizes() == torch::IntArrayRef({num_heads}) || alibi_slopes.sizes() == torch::IntArrayRef({batch_size, num_heads}));
|
| 342 |
+
params.alibi_slopes_ptr = alibi_slopes.data_ptr();
|
| 343 |
+
params.alibi_slopes_batch_stride = alibi_slopes.dim() == 2 ? alibi_slopes.stride(0) : 0;
|
| 344 |
+
} else {
|
| 345 |
+
params.alibi_slopes_ptr = nullptr;
|
| 346 |
+
}
|
| 347 |
+
#endif
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
std::vector<at::Tensor>
|
| 351 |
+
mha_fwd(at::Tensor &q, // batch_size x seqlen_q x num_heads x round_multiple(head_size, 8)
|
| 352 |
+
const at::Tensor &k, // batch_size x seqlen_k x num_heads_k x round_multiple(head_size, 8)
|
| 353 |
+
const at::Tensor &v, // batch_size x seqlen_k x num_heads_k x round_multiple(head_size, 8)
|
| 354 |
+
std::optional<at::Tensor> &out_, // batch_size x seqlen_q x num_heads x round_multiple(head_size, 8)
|
| 355 |
+
std::optional<at::Tensor> &alibi_slopes_, // num_heads or batch_size x num_heads
|
| 356 |
+
const float p_dropout,
|
| 357 |
+
const float softmax_scale,
|
| 358 |
+
bool is_causal,
|
| 359 |
+
int window_size_left,
|
| 360 |
+
int window_size_right,
|
| 361 |
+
const float softcap,
|
| 362 |
+
const bool return_softmax,
|
| 363 |
+
std::optional<at::Generator> gen_) {
|
| 364 |
+
|
| 365 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 366 |
+
at::cuda::CUDAGuard device_guard{q.device()};
|
| 367 |
+
|
| 368 |
+
auto [cc_major, cc_minor] = get_compute_capability(get_current_device());
|
| 369 |
+
bool is_sm8x_min = cc_major >= 8;
|
| 370 |
+
TORCH_CHECK(is_sm8x_min, "FlashAttention only supports Ampere GPUs or newer.");
|
| 371 |
+
|
| 372 |
+
auto q_dtype = q.dtype();
|
| 373 |
+
TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
|
| 374 |
+
"FlashAttention only support fp16 and bf16 data type");
|
| 375 |
+
TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
|
| 376 |
+
TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
|
| 377 |
+
|
| 378 |
+
CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
|
| 379 |
+
|
| 380 |
+
TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 381 |
+
TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 382 |
+
TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 383 |
+
|
| 384 |
+
const auto sizes = q.sizes();
|
| 385 |
+
|
| 386 |
+
const int batch_size = sizes[0];
|
| 387 |
+
int seqlen_q = sizes[1];
|
| 388 |
+
int num_heads = sizes[2];
|
| 389 |
+
const int head_size = sizes[3];
|
| 390 |
+
const int seqlen_k = k.size(1);
|
| 391 |
+
const int num_heads_k = k.size(2);
|
| 392 |
+
TORCH_CHECK(batch_size > 0, "batch size must be positive");
|
| 393 |
+
TORCH_CHECK(head_size <= 256, "FlashAttention forward only supports head dimension at most 256");
|
| 394 |
+
TORCH_CHECK(head_size % 8 == 0, "query, key, value, and out_ must have a head_size that is a multiple of 8");
|
| 395 |
+
TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
|
| 396 |
+
|
| 397 |
+
if (softcap > 0.f) { TORCH_CHECK(p_dropout == 0.f, "Softcapping does not support dropout for now"); }
|
| 398 |
+
|
| 399 |
+
if (window_size_left >= seqlen_k) { window_size_left = -1; }
|
| 400 |
+
if (window_size_right >= seqlen_k) { window_size_right = -1; }
|
| 401 |
+
|
| 402 |
+
// causal=true is the same as causal=false in this case
|
| 403 |
+
if (seqlen_q == 1 && !alibi_slopes_.has_value()) { is_causal = false; }
|
| 404 |
+
if (is_causal) { window_size_right = 0; }
|
| 405 |
+
|
| 406 |
+
// Faster to transpose q from (b, 1, (nheads_kv ngroups), d) to (b, ngroups, nheads_kv, d) in this case
|
| 407 |
+
// H/t Daniel Haziza
|
| 408 |
+
const int seqlenq_ngroups_swapped = seqlen_q == 1 && num_heads > num_heads_k && window_size_left < 0 && window_size_right < 0 && p_dropout == 0.f && head_size % 8 == 0 && !alibi_slopes_.has_value();
|
| 409 |
+
const int ngroups = num_heads / num_heads_k;
|
| 410 |
+
if (seqlenq_ngroups_swapped) {
|
| 411 |
+
q = q.reshape({batch_size, num_heads_k, ngroups, head_size}).transpose(1, 2);
|
| 412 |
+
seqlen_q = ngroups;
|
| 413 |
+
num_heads = num_heads_k;
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
CHECK_SHAPE(q, batch_size, seqlen_q, num_heads, head_size);
|
| 417 |
+
CHECK_SHAPE(k, batch_size, seqlen_k, num_heads_k, head_size);
|
| 418 |
+
CHECK_SHAPE(v, batch_size, seqlen_k, num_heads_k, head_size);
|
| 419 |
+
|
| 420 |
+
at::Tensor out;
|
| 421 |
+
if (out_.has_value()) {
|
| 422 |
+
out = out_.value();
|
| 423 |
+
TORCH_CHECK(out.dtype() == q_dtype, "Output must have the same dtype as inputs");
|
| 424 |
+
CHECK_DEVICE(out);
|
| 425 |
+
TORCH_CHECK(out.stride(-1) == 1, "Output tensor must have contiguous last dimension");
|
| 426 |
+
CHECK_SHAPE(out, batch_size, sizes[1], sizes[2], head_size);
|
| 427 |
+
if (seqlenq_ngroups_swapped) {
|
| 428 |
+
out = out.reshape({batch_size, num_heads_k, ngroups, head_size}).transpose(1, 2);
|
| 429 |
+
}
|
| 430 |
+
} else {
|
| 431 |
+
out = torch::empty_like(q);
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
|
| 435 |
+
const int head_size_rounded = round_multiple(head_size, head_size <= 128 ? 32 : 64);
|
| 436 |
+
const int seqlen_q_rounded = round_multiple(seqlen_q, 128);
|
| 437 |
+
const int seqlen_k_rounded = round_multiple(seqlen_k, 128);
|
| 438 |
+
|
| 439 |
+
auto opts = q.options();
|
| 440 |
+
|
| 441 |
+
auto softmax_lse = torch::empty({batch_size, num_heads, seqlen_q}, opts.dtype(at::kFloat));
|
| 442 |
+
at::Tensor p;
|
| 443 |
+
// Only return softmax if there's dropout to reduce compilation time
|
| 444 |
+
if (return_softmax) {
|
| 445 |
+
TORCH_CHECK(p_dropout > 0.0f, "return_softmax is only supported when p_dropout > 0.0");
|
| 446 |
+
p = torch::empty({ batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded }, opts);
|
| 447 |
+
}
|
| 448 |
+
else {
|
| 449 |
+
p = torch::empty({ 0 }, opts);
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
Flash_fwd_params params;
|
| 453 |
+
set_params_fprop(params,
|
| 454 |
+
batch_size,
|
| 455 |
+
seqlen_q, seqlen_k,
|
| 456 |
+
seqlen_q_rounded, seqlen_k_rounded,
|
| 457 |
+
num_heads, num_heads_k,
|
| 458 |
+
head_size, head_size_rounded,
|
| 459 |
+
q, k, v, out,
|
| 460 |
+
/*cu_seqlens_q_d=*/nullptr,
|
| 461 |
+
/*cu_seqlens_k_d=*/nullptr,
|
| 462 |
+
/*seqused_k=*/nullptr,
|
| 463 |
+
return_softmax ? p.data_ptr() : nullptr,
|
| 464 |
+
softmax_lse.data_ptr(),
|
| 465 |
+
p_dropout,
|
| 466 |
+
softmax_scale,
|
| 467 |
+
window_size_left,
|
| 468 |
+
window_size_right,
|
| 469 |
+
softcap
|
| 470 |
+
);
|
| 471 |
+
|
| 472 |
+
// Keep references to these tensors to extend their lifetime
|
| 473 |
+
at::Tensor softmax_lse_accum, out_accum;
|
| 474 |
+
std::tie(softmax_lse_accum, out_accum) = set_params_splitkv(
|
| 475 |
+
params, batch_size, num_heads, head_size, seqlen_k, seqlen_q,
|
| 476 |
+
head_size_rounded, p_dropout, /*num_splits*/ 0, get_num_sm(get_current_device()), opts);
|
| 477 |
+
|
| 478 |
+
// number of times random will be generated per thread, to offset philox counter in thc random
|
| 479 |
+
// state
|
| 480 |
+
// We use a custom RNG that increases the offset by batch_size * nheads * 32.
|
| 481 |
+
int64_t counter_offset = params.b * params.h * 32;
|
| 482 |
+
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA);
|
| 483 |
+
auto rng_state = torch::empty({2}, options.dtype(torch::kInt64));
|
| 484 |
+
// Forward kernel will populate memory with the seed and offset.
|
| 485 |
+
params.rng_state = reinterpret_cast<uint64_t*>(rng_state.data_ptr());
|
| 486 |
+
|
| 487 |
+
if (p_dropout > 0.0) {
|
| 488 |
+
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
|
| 489 |
+
gen_, at::cuda::detail::getDefaultCUDAGenerator());
|
| 490 |
+
// See Note [Acquire lock when using random generators]
|
| 491 |
+
std::lock_guard<std::mutex> lock(gen->mutex_);
|
| 492 |
+
params.philox_args = gen->philox_cuda_state(counter_offset);
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
|
| 496 |
+
|
| 497 |
+
if (seqlen_k > 0) {
|
| 498 |
+
auto stream = at::cuda::getCurrentCUDAStream().stream();
|
| 499 |
+
run_mha_fwd(params, stream);
|
| 500 |
+
} else {
|
| 501 |
+
// If seqlen_k == 0, then we have an empty tensor. We need to set the output to 0.
|
| 502 |
+
out.zero_();
|
| 503 |
+
softmax_lse.fill_(std::numeric_limits<float>::infinity());
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
if (seqlenq_ngroups_swapped) {
|
| 507 |
+
out = out.transpose(1, 2).reshape({batch_size, 1, num_heads_k * seqlen_q, head_size});
|
| 508 |
+
q = q.transpose(1, 2).reshape({batch_size, 1, num_heads_k * seqlen_q, head_size});
|
| 509 |
+
softmax_lse = softmax_lse.reshape({batch_size, num_heads_k * seqlen_q, 1});
|
| 510 |
+
}
|
| 511 |
+
return {out, softmax_lse, p, rng_state};
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
std::vector<at::Tensor>
|
| 515 |
+
mha_varlen_fwd(at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
|
| 516 |
+
const at::Tensor &k, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table.
|
| 517 |
+
const at::Tensor &v, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table.
|
| 518 |
+
std::optional<at::Tensor> &out_, // total_q x num_heads x head_size, total_k := \sum_{i=0}^{b} s_i
|
| 519 |
+
const at::Tensor &cu_seqlens_q, // b+1
|
| 520 |
+
const at::Tensor &cu_seqlens_k, // b+1
|
| 521 |
+
std::optional<at::Tensor> &seqused_k, // b. If given, only this many elements of each batch element's keys are used.
|
| 522 |
+
std::optional<const at::Tensor> &leftpad_k_, // batch_size
|
| 523 |
+
std::optional<at::Tensor> &block_table_, // batch_size x max_num_blocks_per_seq
|
| 524 |
+
std::optional<at::Tensor> &alibi_slopes_, // num_heads or b x num_heads
|
| 525 |
+
int max_seqlen_q,
|
| 526 |
+
const int max_seqlen_k,
|
| 527 |
+
const float p_dropout,
|
| 528 |
+
const float softmax_scale,
|
| 529 |
+
const bool zero_tensors,
|
| 530 |
+
bool is_causal,
|
| 531 |
+
int window_size_left,
|
| 532 |
+
int window_size_right,
|
| 533 |
+
const float softcap,
|
| 534 |
+
const bool return_softmax,
|
| 535 |
+
std::optional<at::Generator> gen_) {
|
| 536 |
+
|
| 537 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 538 |
+
at::cuda::CUDAGuard device_guard{q.device()};
|
| 539 |
+
|
| 540 |
+
auto [cc_major, cc_minor] = get_compute_capability(get_current_device());
|
| 541 |
+
bool is_sm8x_min = cc_major >= 8;
|
| 542 |
+
TORCH_CHECK(is_sm8x_min, "FlashAttention only supports Ampere GPUs or newer.");
|
| 543 |
+
|
| 544 |
+
auto q_dtype = q.dtype();
|
| 545 |
+
TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
|
| 546 |
+
"FlashAttention only support fp16 and bf16 data type");
|
| 547 |
+
TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
|
| 548 |
+
TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
|
| 549 |
+
TORCH_CHECK(cu_seqlens_q.dtype() == torch::kInt32, "cu_seqlens_q must have dtype int32");
|
| 550 |
+
TORCH_CHECK(cu_seqlens_k.dtype() == torch::kInt32, "cu_seqlens_k must have dtype int32");
|
| 551 |
+
|
| 552 |
+
CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
|
| 553 |
+
CHECK_DEVICE(cu_seqlens_q);
|
| 554 |
+
CHECK_DEVICE(cu_seqlens_k);
|
| 555 |
+
|
| 556 |
+
at::Tensor block_table;
|
| 557 |
+
const bool paged_KV = block_table_.has_value();
|
| 558 |
+
if (paged_KV) {
|
| 559 |
+
block_table = block_table_.value();
|
| 560 |
+
CHECK_DEVICE(block_table);
|
| 561 |
+
TORCH_CHECK(block_table.dtype() == torch::kInt32, "block_table must have dtype torch.int32");
|
| 562 |
+
TORCH_CHECK(block_table.stride(-1) == 1, "block_table must have contiguous last dimension");
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 566 |
+
TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 567 |
+
TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 568 |
+
CHECK_CONTIGUOUS(cu_seqlens_q);
|
| 569 |
+
CHECK_CONTIGUOUS(cu_seqlens_k);
|
| 570 |
+
|
| 571 |
+
const auto sizes = q.sizes();
|
| 572 |
+
|
| 573 |
+
const int batch_size = cu_seqlens_q.numel() - 1;
|
| 574 |
+
int num_heads = sizes[1];
|
| 575 |
+
const int head_size = sizes[2];
|
| 576 |
+
const int num_heads_k = paged_KV ? k.size(2) : k.size(1);
|
| 577 |
+
|
| 578 |
+
if (softcap > 0.f) { TORCH_CHECK(p_dropout == 0.f, "Softcapping does not support dropout for now"); }
|
| 579 |
+
|
| 580 |
+
const int max_num_blocks_per_seq = !paged_KV ? 0 : block_table.size(1);
|
| 581 |
+
const int num_blocks = !paged_KV ? 0 : k.size(0);
|
| 582 |
+
const int page_block_size = !paged_KV ? 1 : k.size(1);
|
| 583 |
+
TORCH_CHECK(!paged_KV || page_block_size % 256 == 0, "Paged KV cache block size must be divisible by 256");
|
| 584 |
+
|
| 585 |
+
if (max_seqlen_q == 1 && !alibi_slopes_.has_value()) { is_causal = false; } // causal=true is the same as causal=false in this case
|
| 586 |
+
if (is_causal) { window_size_right = 0; }
|
| 587 |
+
|
| 588 |
+
void *cu_seqlens_q_d = cu_seqlens_q.data_ptr();
|
| 589 |
+
|
| 590 |
+
// Faster to transpose q from (b, 1, (nheads_kv ngroups), d) to (b, ngroups, nheads_kv, d) in this case
|
| 591 |
+
// H/t Daniel Haziza
|
| 592 |
+
const int seqlenq_ngroups_swapped = max_seqlen_q == 1 && num_heads > num_heads_k && window_size_left < 0 && window_size_right < 0 && p_dropout == 0.f && head_size % 8 == 0 && !alibi_slopes_.has_value();
|
| 593 |
+
const int ngroups = num_heads / num_heads_k;
|
| 594 |
+
if (seqlenq_ngroups_swapped) {
|
| 595 |
+
q = q.reshape({batch_size, num_heads_k, ngroups, head_size}).transpose(1, 2).reshape({batch_size * ngroups, num_heads_k, head_size});
|
| 596 |
+
max_seqlen_q = ngroups;
|
| 597 |
+
num_heads = num_heads_k;
|
| 598 |
+
cu_seqlens_q_d = nullptr;
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
const int total_q = q.sizes()[0];
|
| 602 |
+
|
| 603 |
+
TORCH_CHECK(batch_size > 0, "batch size must be positive");
|
| 604 |
+
TORCH_CHECK(head_size <= 256, "FlashAttention forward only supports head dimension at most 256");
|
| 605 |
+
TORCH_CHECK(head_size % 8 == 0, "query, key, value, and out_ must have a head_size that is a multiple of 8");
|
| 606 |
+
TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
|
| 607 |
+
|
| 608 |
+
if (window_size_left >= max_seqlen_k) { window_size_left = -1; }
|
| 609 |
+
if (window_size_right >= max_seqlen_k) { window_size_right = -1; }
|
| 610 |
+
|
| 611 |
+
CHECK_SHAPE(q, total_q, num_heads, head_size);
|
| 612 |
+
if (!paged_KV) {
|
| 613 |
+
const int total_k = k.size(0);
|
| 614 |
+
CHECK_SHAPE(k, total_k, num_heads_k, head_size);
|
| 615 |
+
CHECK_SHAPE(v, total_k, num_heads_k, head_size);
|
| 616 |
+
} else {
|
| 617 |
+
CHECK_SHAPE(k, num_blocks, page_block_size, num_heads_k, head_size);
|
| 618 |
+
CHECK_SHAPE(v, num_blocks, page_block_size, num_heads_k, head_size);
|
| 619 |
+
CHECK_SHAPE(block_table, batch_size, max_num_blocks_per_seq);
|
| 620 |
+
}
|
| 621 |
+
|
| 622 |
+
CHECK_SHAPE(cu_seqlens_q, batch_size + 1);
|
| 623 |
+
CHECK_SHAPE(cu_seqlens_k, batch_size + 1);
|
| 624 |
+
if (seqused_k.has_value()){
|
| 625 |
+
auto seqused_k_ = seqused_k.value();
|
| 626 |
+
TORCH_CHECK(seqused_k_.dtype() == torch::kInt32, "seqused_k must have dtype int32");
|
| 627 |
+
TORCH_CHECK(seqused_k_.is_cuda(), "seqused_k must be on CUDA device");
|
| 628 |
+
TORCH_CHECK(seqused_k_.is_contiguous(), "seqused_k must be contiguous");
|
| 629 |
+
CHECK_SHAPE(seqused_k_, batch_size);
|
| 630 |
+
}
|
| 631 |
+
|
| 632 |
+
at::Tensor out;
|
| 633 |
+
if (out_.has_value()) {
|
| 634 |
+
out = out_.value();
|
| 635 |
+
TORCH_CHECK(out.dtype() == q_dtype, "Output must have the same dtype as inputs");
|
| 636 |
+
CHECK_DEVICE(out);
|
| 637 |
+
TORCH_CHECK(out.stride(-1) == 1, "Output tensor must have contiguous last dimension");
|
| 638 |
+
CHECK_SHAPE(out, sizes[0], sizes[1], head_size);
|
| 639 |
+
if (seqlenq_ngroups_swapped) {
|
| 640 |
+
out = out.reshape({batch_size, num_heads_k, ngroups, head_size}).transpose(1, 2).reshape({batch_size * ngroups, num_heads_k, head_size});
|
| 641 |
+
}
|
| 642 |
+
} else {
|
| 643 |
+
out = torch::empty_like(q);
|
| 644 |
+
}
|
| 645 |
+
|
| 646 |
+
auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
|
| 647 |
+
const int head_size_rounded = round_multiple(head_size, head_size <= 128 ? 32 : 64);
|
| 648 |
+
const int seqlen_q_rounded = round_multiple(max_seqlen_q, 128);
|
| 649 |
+
const int seqlen_k_rounded = round_multiple(max_seqlen_k, 128);
|
| 650 |
+
|
| 651 |
+
auto opts = q.options();
|
| 652 |
+
auto softmax_lse = torch::empty({num_heads, total_q}, opts.dtype(at::kFloat));
|
| 653 |
+
at::Tensor p;
|
| 654 |
+
// Only return softmax if there's dropout to reduce compilation time
|
| 655 |
+
if (return_softmax) {
|
| 656 |
+
TORCH_CHECK(p_dropout > 0.0f, "return_softmax is only supported when p_dropout > 0.0");
|
| 657 |
+
p = torch::empty({ batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded }, opts);
|
| 658 |
+
}
|
| 659 |
+
else {
|
| 660 |
+
p = torch::empty({ 0 }, opts);
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
if (zero_tensors) {
|
| 664 |
+
out.zero_();
|
| 665 |
+
softmax_lse.fill_(-std::numeric_limits<float>::infinity());
|
| 666 |
+
if (return_softmax) {p.zero_();}
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
Flash_fwd_params params;
|
| 670 |
+
set_params_fprop(params,
|
| 671 |
+
batch_size,
|
| 672 |
+
max_seqlen_q, max_seqlen_k,
|
| 673 |
+
seqlen_q_rounded, seqlen_k_rounded,
|
| 674 |
+
num_heads, num_heads_k,
|
| 675 |
+
head_size, head_size_rounded,
|
| 676 |
+
q, k, v, out,
|
| 677 |
+
cu_seqlens_q_d,
|
| 678 |
+
cu_seqlens_k.data_ptr(),
|
| 679 |
+
seqused_k.has_value() ? seqused_k.value().data_ptr() : nullptr,
|
| 680 |
+
return_softmax ? p.data_ptr() : nullptr,
|
| 681 |
+
softmax_lse.data_ptr(),
|
| 682 |
+
p_dropout,
|
| 683 |
+
softmax_scale,
|
| 684 |
+
window_size_left,
|
| 685 |
+
window_size_right,
|
| 686 |
+
softcap,
|
| 687 |
+
seqlenq_ngroups_swapped,
|
| 688 |
+
/*unpadded_lse*/true);
|
| 689 |
+
params.total_q = total_q;
|
| 690 |
+
|
| 691 |
+
if (paged_KV) {
|
| 692 |
+
params.block_table = block_table.data_ptr<int>();
|
| 693 |
+
params.block_table_batch_stride = block_table.stride(0);
|
| 694 |
+
params.k_batch_stride = k.stride(0);
|
| 695 |
+
params.v_batch_stride = v.stride(0);
|
| 696 |
+
}
|
| 697 |
+
params.page_block_size = page_block_size;
|
| 698 |
+
// Keep references to these tensors to extend their lifetime
|
| 699 |
+
at::Tensor softmax_lse_accum, out_accum;
|
| 700 |
+
if (seqlenq_ngroups_swapped) {
|
| 701 |
+
// Only apply split-k for decoding
|
| 702 |
+
std::tie(softmax_lse_accum, out_accum) =
|
| 703 |
+
set_params_splitkv(params, batch_size, num_heads, head_size,
|
| 704 |
+
max_seqlen_k, max_seqlen_q, head_size_rounded,
|
| 705 |
+
p_dropout, /*num_splits*/ 0, get_num_sm(get_current_device()), opts);
|
| 706 |
+
}
|
| 707 |
+
|
| 708 |
+
if (leftpad_k_.has_value()) {
|
| 709 |
+
auto leftpad_k = leftpad_k_.value();
|
| 710 |
+
TORCH_CHECK(!paged_KV, "We don't support Paged KV and leftpad_k running at the same time yet");
|
| 711 |
+
TORCH_CHECK(leftpad_k.dtype() == torch::kInt32, "leftpad_k must have dtype int32");
|
| 712 |
+
CHECK_DEVICE(leftpad_k);
|
| 713 |
+
CHECK_CONTIGUOUS(leftpad_k);
|
| 714 |
+
CHECK_SHAPE(leftpad_k, batch_size);
|
| 715 |
+
params.leftpad_k = static_cast<int *>(leftpad_k.data_ptr());
|
| 716 |
+
}
|
| 717 |
+
|
| 718 |
+
// number of times random will be generated per thread, to offset philox counter in thc random
|
| 719 |
+
// state
|
| 720 |
+
// We use a custom RNG that increases the offset by batch_size * nheads * 32.
|
| 721 |
+
int64_t counter_offset = params.b * params.h * 32;
|
| 722 |
+
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA);
|
| 723 |
+
auto rng_state = torch::empty({2}, options.dtype(torch::kInt64));
|
| 724 |
+
// Forward kernel will populate memory with the seed and offset.
|
| 725 |
+
params.rng_state = reinterpret_cast<uint64_t*>(rng_state.data_ptr());
|
| 726 |
+
|
| 727 |
+
if (p_dropout > 0.0) {
|
| 728 |
+
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
|
| 729 |
+
gen_, at::cuda::detail::getDefaultCUDAGenerator());
|
| 730 |
+
// See Note [Acquire lock when using random generators]
|
| 731 |
+
std::lock_guard<std::mutex> lock(gen->mutex_);
|
| 732 |
+
params.philox_args = gen->philox_cuda_state(counter_offset);
|
| 733 |
+
}
|
| 734 |
+
|
| 735 |
+
set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
|
| 736 |
+
|
| 737 |
+
if (max_seqlen_k > 0) {
|
| 738 |
+
auto stream = at::cuda::getCurrentCUDAStream().stream();
|
| 739 |
+
run_mha_fwd(params, stream, paged_KV);
|
| 740 |
+
} else {
|
| 741 |
+
// If seqlen_k == 0, then we have an empty tensor. We need to set the output to 0.
|
| 742 |
+
out.zero_();
|
| 743 |
+
softmax_lse.fill_(std::numeric_limits<float>::infinity());
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
if (seqlenq_ngroups_swapped) {
|
| 747 |
+
int64_t size_before[] = {batch_size, max_seqlen_q, num_heads_k, head_size};
|
| 748 |
+
int64_t size_after[] = {batch_size, num_heads_k * max_seqlen_q, head_size};
|
| 749 |
+
out = out.reshape(size_before).transpose(1, 2).reshape(size_after);
|
| 750 |
+
q = q.reshape(size_before).transpose(1, 2).reshape(size_after);
|
| 751 |
+
softmax_lse = softmax_lse.reshape({num_heads * max_seqlen_q, batch_size});
|
| 752 |
+
}
|
| 753 |
+
|
| 754 |
+
return {out, softmax_lse, p, rng_state};
|
| 755 |
+
}
|
| 756 |
+
|
| 757 |
+
void run_mha_bwd(Flash_bwd_params ¶ms, cudaStream_t stream) {
|
| 758 |
+
FP16_SWITCH(!params.is_bf16, [&] {
|
| 759 |
+
HEADDIM_SWITCH(params.d, [&] {
|
| 760 |
+
BOOL_SWITCH(params.is_causal, Is_causal, [&] {
|
| 761 |
+
run_mha_bwd_<elem_type, kHeadDim, Is_causal>(params, stream);
|
| 762 |
+
});
|
| 763 |
+
});
|
| 764 |
+
});
|
| 765 |
+
}
|
| 766 |
+
|
| 767 |
+
std::vector<at::Tensor>
|
| 768 |
+
mha_bwd(const at::Tensor &dout, // batch_size x seqlen_q x num_heads, x multiple_of(head_size_og, 8)
|
| 769 |
+
const at::Tensor &q, // batch_size x seqlen_q x num_heads x head_size
|
| 770 |
+
const at::Tensor &k, // batch_size x seqlen_k x num_heads_k x head_size
|
| 771 |
+
const at::Tensor &v, // batch_size x seqlen_k x num_heads_k x head_size
|
| 772 |
+
const at::Tensor &out, // batch_size x seqlen_q x num_heads x head_size
|
| 773 |
+
const at::Tensor &softmax_lse, // b x h x seqlen_q
|
| 774 |
+
std::optional<at::Tensor> &dq_, // batch_size x seqlen_q x num_heads x head_size
|
| 775 |
+
std::optional<at::Tensor> &dk_, // batch_size x seqlen_k x num_heads_k x head_size
|
| 776 |
+
std::optional<at::Tensor> &dv_, // batch_size x seqlen_k x num_heads_k x head_size
|
| 777 |
+
std::optional<at::Tensor> &alibi_slopes_, // num_heads or batch_size x num_heads
|
| 778 |
+
const float p_dropout, // probability to drop
|
| 779 |
+
const float softmax_scale,
|
| 780 |
+
const bool is_causal,
|
| 781 |
+
int window_size_left,
|
| 782 |
+
int window_size_right,
|
| 783 |
+
const float softcap,
|
| 784 |
+
const bool deterministic,
|
| 785 |
+
std::optional<at::Generator> gen_,
|
| 786 |
+
std::optional<at::Tensor> &rng_state) {
|
| 787 |
+
|
| 788 |
+
#ifdef FLASHATTENTION_DISABLE_BACKWARD
|
| 789 |
+
TORCH_CHECK(false, "This flash attention build does not support backward.");
|
| 790 |
+
#endif
|
| 791 |
+
if (is_causal) { window_size_right = 0; }
|
| 792 |
+
|
| 793 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 794 |
+
at::cuda::CUDAGuard device_guard{q.device()};
|
| 795 |
+
|
| 796 |
+
auto [cc_major, cc_minor] = get_compute_capability(get_current_device());
|
| 797 |
+
bool is_sm8x_min = cc_major >= 8;
|
| 798 |
+
TORCH_CHECK(is_sm8x_min, "FlashAttention only supports Ampere GPUs or newer.");
|
| 799 |
+
|
| 800 |
+
bool is_dropout = p_dropout > 0.0;
|
| 801 |
+
auto stream = at::cuda::getCurrentCUDAStream().stream();
|
| 802 |
+
|
| 803 |
+
auto q_dtype = q.dtype();
|
| 804 |
+
TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
|
| 805 |
+
"FlashAttention only support fp16 and bf16 data type");
|
| 806 |
+
TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
|
| 807 |
+
TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
|
| 808 |
+
TORCH_CHECK(out.dtype() == q_dtype, "query and out must have the same dtype");
|
| 809 |
+
TORCH_CHECK(dout.dtype() == q_dtype, "query and dout must have the same dtype");
|
| 810 |
+
|
| 811 |
+
CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
|
| 812 |
+
CHECK_DEVICE(out); CHECK_DEVICE(dout); CHECK_DEVICE(softmax_lse);
|
| 813 |
+
|
| 814 |
+
TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 815 |
+
TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 816 |
+
TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 817 |
+
TORCH_CHECK(out.stride(-1) == 1, "out tensor must have contiguous last dimension");
|
| 818 |
+
TORCH_CHECK(dout.stride(-1) == 1, "dout tensor must have contiguous last dimension");
|
| 819 |
+
|
| 820 |
+
const auto sizes = q.sizes();
|
| 821 |
+
|
| 822 |
+
const int batch_size = sizes[0];
|
| 823 |
+
const int seqlen_q = sizes[1];
|
| 824 |
+
const int num_heads = sizes[2];
|
| 825 |
+
const int head_size = sizes[3];
|
| 826 |
+
const int seqlen_k = k.size(1);
|
| 827 |
+
const int num_heads_k = k.size(2);
|
| 828 |
+
TORCH_CHECK(batch_size > 0, "batch size must be positive");
|
| 829 |
+
TORCH_CHECK(head_size % 8 == 0, "head_size should be a multiple of 8");
|
| 830 |
+
TORCH_CHECK(head_size <= 256, "FlashAttention backward only supports head dimension at most 256");
|
| 831 |
+
TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
|
| 832 |
+
|
| 833 |
+
auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
|
| 834 |
+
const int head_size_rounded = round_multiple(head_size, head_size <= 128 ? 32 : 64);
|
| 835 |
+
const int seqlen_q_rounded = round_multiple(seqlen_q, 128);
|
| 836 |
+
const int seqlen_k_rounded = round_multiple(seqlen_k, 128);
|
| 837 |
+
|
| 838 |
+
if (softcap > 0.f) { TORCH_CHECK(p_dropout == 0.f, "Softcapping does not support dropout for now"); }
|
| 839 |
+
|
| 840 |
+
if (window_size_left >= seqlen_k) { window_size_left = -1; }
|
| 841 |
+
if (window_size_right >= seqlen_k) { window_size_right = -1; }
|
| 842 |
+
|
| 843 |
+
CHECK_SHAPE(q, batch_size, seqlen_q, num_heads, head_size);
|
| 844 |
+
CHECK_SHAPE(k, batch_size, seqlen_k, num_heads_k, head_size);
|
| 845 |
+
CHECK_SHAPE(v, batch_size, seqlen_k, num_heads_k, head_size);
|
| 846 |
+
CHECK_SHAPE(out, batch_size, seqlen_q, num_heads, head_size);
|
| 847 |
+
CHECK_SHAPE(dout, batch_size, seqlen_q, num_heads, head_size);
|
| 848 |
+
|
| 849 |
+
at::Tensor dq, dk, dv;
|
| 850 |
+
if (dq_.has_value()) {
|
| 851 |
+
dq = dq_.value();
|
| 852 |
+
TORCH_CHECK(dq.dtype() == q_dtype, "dq must have the same dtype as q");
|
| 853 |
+
CHECK_DEVICE(dq);
|
| 854 |
+
TORCH_CHECK(dq.stride(-1) == 1, "dq must have contiguous last dimension");
|
| 855 |
+
CHECK_SHAPE(dq, batch_size, seqlen_q, num_heads, head_size);
|
| 856 |
+
} else {
|
| 857 |
+
dq = torch::empty_like(q);
|
| 858 |
+
}
|
| 859 |
+
if (dk_.has_value()) {
|
| 860 |
+
dk = dk_.value();
|
| 861 |
+
TORCH_CHECK(dk.dtype() == q_dtype, "dk must have the same dtype as q");
|
| 862 |
+
CHECK_DEVICE(dk);
|
| 863 |
+
TORCH_CHECK(dk.stride(-1) == 1, "dk must have contiguous last dimension");
|
| 864 |
+
CHECK_SHAPE(dk, batch_size, seqlen_k, num_heads_k, head_size);
|
| 865 |
+
} else {
|
| 866 |
+
dk = torch::empty_like(k);
|
| 867 |
+
}
|
| 868 |
+
if (dv_.has_value()) {
|
| 869 |
+
dv = dv_.value();
|
| 870 |
+
TORCH_CHECK(dv.dtype() == q_dtype, "dv must have the same dtype as q");
|
| 871 |
+
CHECK_DEVICE(dv);
|
| 872 |
+
TORCH_CHECK(dv.stride(-1) == 1, "dv must have contiguous last dimension");
|
| 873 |
+
CHECK_SHAPE(dv, batch_size, seqlen_k, num_heads_k, head_size);
|
| 874 |
+
} else {
|
| 875 |
+
dv = torch::empty_like(v);
|
| 876 |
+
}
|
| 877 |
+
|
| 878 |
+
// bool loop = seqlen_k > blocksize_c;
|
| 879 |
+
// TODO: change later, for now set to true for simplicity
|
| 880 |
+
bool loop = true;
|
| 881 |
+
|
| 882 |
+
auto opts = q.options();
|
| 883 |
+
auto softmax_d = torch::empty({batch_size, num_heads, seqlen_q_rounded}, opts.dtype(at::kFloat));
|
| 884 |
+
at::Tensor dq_accum;
|
| 885 |
+
at::Tensor dk_accum, dv_accum;
|
| 886 |
+
if (loop) {
|
| 887 |
+
if (!deterministic) {
|
| 888 |
+
dq_accum = torch::empty({batch_size, seqlen_q_rounded, num_heads, head_size_rounded}, opts.dtype(at::kFloat));
|
| 889 |
+
} else {
|
| 890 |
+
const int nsplits = (get_num_sm(get_current_device()) + batch_size * num_heads - 1) / (batch_size * num_heads);
|
| 891 |
+
dq_accum = torch::zeros({nsplits, batch_size, seqlen_q_rounded, num_heads, head_size_rounded}, opts.dtype(at::kFloat));
|
| 892 |
+
}
|
| 893 |
+
// dk_accum = torch::empty({batch_size, num_heads_k, seqlen_k_rounded, head_size_rounded}, opts.dtype(at::kFloat));
|
| 894 |
+
// dv_accum = torch::empty({batch_size, num_heads_k, seqlen_k_rounded, head_size_rounded}, opts.dtype(at::kFloat));
|
| 895 |
+
}
|
| 896 |
+
|
| 897 |
+
at::Tensor dk_expanded, dv_expanded;
|
| 898 |
+
if (num_heads_k != num_heads) { // MQA / GQA
|
| 899 |
+
dk_expanded = torch::empty({batch_size, seqlen_k, num_heads, head_size}, opts);
|
| 900 |
+
dv_expanded = torch::empty({batch_size, seqlen_k, num_heads, head_size}, opts);
|
| 901 |
+
} else {
|
| 902 |
+
dk_expanded = dk;
|
| 903 |
+
dv_expanded = dv;
|
| 904 |
+
}
|
| 905 |
+
|
| 906 |
+
Flash_bwd_params params;
|
| 907 |
+
|
| 908 |
+
set_params_dgrad(params,
|
| 909 |
+
batch_size,
|
| 910 |
+
seqlen_q, seqlen_k,
|
| 911 |
+
seqlen_q_rounded, seqlen_k_rounded,
|
| 912 |
+
num_heads, num_heads_k,
|
| 913 |
+
head_size, head_size_rounded,
|
| 914 |
+
q, k, v, out,
|
| 915 |
+
dout, dq, dk_expanded, dv_expanded,
|
| 916 |
+
nullptr,
|
| 917 |
+
nullptr,
|
| 918 |
+
loop ? dq_accum.data_ptr() : nullptr,
|
| 919 |
+
// loop ? dk_accum.data_ptr() : nullptr,
|
| 920 |
+
// loop ? dv_accum.data_ptr() : nullptr,
|
| 921 |
+
nullptr,
|
| 922 |
+
nullptr,
|
| 923 |
+
softmax_lse.data_ptr(),
|
| 924 |
+
softmax_d.data_ptr(),
|
| 925 |
+
p_dropout,
|
| 926 |
+
softmax_scale,
|
| 927 |
+
window_size_left,
|
| 928 |
+
window_size_right,
|
| 929 |
+
softcap,
|
| 930 |
+
deterministic,
|
| 931 |
+
/*unpadded_lse*/false);
|
| 932 |
+
params.dq_accum_split_stride = !deterministic ? 0 : dq_accum.stride(0);
|
| 933 |
+
|
| 934 |
+
auto launch = &run_mha_bwd;
|
| 935 |
+
|
| 936 |
+
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
|
| 937 |
+
gen_, at::cuda::detail::getDefaultCUDAGenerator());
|
| 938 |
+
|
| 939 |
+
// We use a custom RNG that increases the offset by batch_size * nheads * 32.
|
| 940 |
+
int64_t counter_offset = params.b * params.h * 32;
|
| 941 |
+
|
| 942 |
+
if ( rng_state.has_value() ) {
|
| 943 |
+
params.rng_state = reinterpret_cast<uint64_t*>(rng_state.value().data_ptr());
|
| 944 |
+
} else if( is_dropout ) {
|
| 945 |
+
// See Note [Acquire lock when using random generators]
|
| 946 |
+
std::lock_guard<std::mutex> lock(gen->mutex_);
|
| 947 |
+
params.philox_args = gen->philox_cuda_state(counter_offset);
|
| 948 |
+
auto seeds = at::cuda::philox::unpack(params.philox_args);
|
| 949 |
+
params.rng_state[0] = std::get<0>(seeds);
|
| 950 |
+
params.rng_state[1] = std::get<1>(seeds);
|
| 951 |
+
}
|
| 952 |
+
|
| 953 |
+
set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
|
| 954 |
+
|
| 955 |
+
if (seqlen_q > 0) {
|
| 956 |
+
launch(params, stream);
|
| 957 |
+
} else {
|
| 958 |
+
// If seqlen_q == 0, then we have an empty tensor. We need to set the output to 0.
|
| 959 |
+
dk_expanded.zero_();
|
| 960 |
+
dv_expanded.zero_();
|
| 961 |
+
softmax_d.zero_();
|
| 962 |
+
}
|
| 963 |
+
|
| 964 |
+
// For MQA/GQA we need to sum dK and dV across the groups
|
| 965 |
+
if (num_heads_k != num_heads) {
|
| 966 |
+
at::sum_out(dk, at::reshape(dk_expanded, {batch_size, seqlen_k, num_heads_k, num_heads / num_heads_k, head_size}), {3});
|
| 967 |
+
at::sum_out(dv, at::reshape(dv_expanded, {batch_size, seqlen_k, num_heads_k, num_heads / num_heads_k, head_size}), {3});
|
| 968 |
+
}
|
| 969 |
+
|
| 970 |
+
return { dq, dk, dv, softmax_d };
|
| 971 |
+
}
|
| 972 |
+
|
| 973 |
+
std::vector<at::Tensor>
|
| 974 |
+
mha_varlen_bwd(const at::Tensor &dout, // total_q x num_heads, x head_size
|
| 975 |
+
const at::Tensor &q, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
|
| 976 |
+
const at::Tensor &k, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i
|
| 977 |
+
const at::Tensor &v, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i
|
| 978 |
+
const at::Tensor &out, // total_q x num_heads x head_size
|
| 979 |
+
const at::Tensor &softmax_lse, // h x total_q, softmax logsumexp
|
| 980 |
+
std::optional<at::Tensor> &dq_, // total_q x num_heads x head_size, total_q := \sum_{i=0}^{b} s_i
|
| 981 |
+
std::optional<at::Tensor> &dk_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i
|
| 982 |
+
std::optional<at::Tensor> &dv_, // total_k x num_heads_k x head_size, total_k := \sum_{i=0}^{b} s_i
|
| 983 |
+
const at::Tensor &cu_seqlens_q, // b+1
|
| 984 |
+
const at::Tensor &cu_seqlens_k, // b+1
|
| 985 |
+
std::optional<at::Tensor> &alibi_slopes_, // num_heads or b x num_heads
|
| 986 |
+
const int max_seqlen_q,
|
| 987 |
+
const int max_seqlen_k, // max sequence length to choose the kernel
|
| 988 |
+
const float p_dropout, // probability to drop
|
| 989 |
+
const float softmax_scale,
|
| 990 |
+
const bool zero_tensors,
|
| 991 |
+
const bool is_causal,
|
| 992 |
+
int window_size_left,
|
| 993 |
+
int window_size_right,
|
| 994 |
+
const float softcap,
|
| 995 |
+
const bool deterministic,
|
| 996 |
+
std::optional<at::Generator> gen_,
|
| 997 |
+
std::optional<at::Tensor> &rng_state) {
|
| 998 |
+
|
| 999 |
+
#ifdef FLASHATTENTION_DISABLE_BACKWARD
|
| 1000 |
+
TORCH_CHECK(false, "This flash attention build does not support backward.");
|
| 1001 |
+
#endif
|
| 1002 |
+
if (is_causal) { window_size_right = 0; }
|
| 1003 |
+
|
| 1004 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 1005 |
+
at::cuda::CUDAGuard device_guard{q.device()};
|
| 1006 |
+
|
| 1007 |
+
auto [cc_major, cc_minor] = get_compute_capability(get_current_device());
|
| 1008 |
+
bool is_sm8x_min = cc_major >= 8;
|
| 1009 |
+
TORCH_CHECK(is_sm8x_min, "FlashAttention only supports Ampere GPUs or newer.");
|
| 1010 |
+
|
| 1011 |
+
bool is_dropout = p_dropout > 0.0;
|
| 1012 |
+
auto stream = at::cuda::getCurrentCUDAStream().stream();
|
| 1013 |
+
|
| 1014 |
+
auto q_dtype = q.dtype();
|
| 1015 |
+
TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
|
| 1016 |
+
"FlashAttention only support fp16 and bf16 data type");
|
| 1017 |
+
TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
|
| 1018 |
+
TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
|
| 1019 |
+
TORCH_CHECK(out.dtype() == q_dtype, "query and out must have the same dtype");
|
| 1020 |
+
TORCH_CHECK(dout.dtype() == q_dtype, "query and dout must have the same dtype");
|
| 1021 |
+
TORCH_CHECK(cu_seqlens_q.dtype() == torch::kInt32, "cu_seqlens_q must have dtype int32");
|
| 1022 |
+
TORCH_CHECK(cu_seqlens_k.dtype() == torch::kInt32, "cu_seqlens_k must have dtype int32");
|
| 1023 |
+
|
| 1024 |
+
CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
|
| 1025 |
+
CHECK_DEVICE(out); CHECK_DEVICE(dout); CHECK_DEVICE(softmax_lse);
|
| 1026 |
+
CHECK_DEVICE(cu_seqlens_q); CHECK_DEVICE(cu_seqlens_k);
|
| 1027 |
+
|
| 1028 |
+
TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 1029 |
+
TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 1030 |
+
TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 1031 |
+
TORCH_CHECK(out.stride(-1) == 1, "out tensor must have contiguous last dimension");
|
| 1032 |
+
TORCH_CHECK(dout.stride(-1) == 1, "dout tensor must have contiguous last dimension");
|
| 1033 |
+
CHECK_CONTIGUOUS(cu_seqlens_q);
|
| 1034 |
+
CHECK_CONTIGUOUS(cu_seqlens_k);
|
| 1035 |
+
|
| 1036 |
+
const auto sizes = q.sizes();
|
| 1037 |
+
|
| 1038 |
+
const int total_q = sizes[0];
|
| 1039 |
+
const int batch_size = cu_seqlens_q.numel() - 1;
|
| 1040 |
+
const int num_heads = sizes[1];
|
| 1041 |
+
const int head_size = sizes[2];
|
| 1042 |
+
const int total_k = k.size(0);
|
| 1043 |
+
const int num_heads_k = k.size(1);
|
| 1044 |
+
TORCH_CHECK(batch_size > 0, "batch size must be positive");
|
| 1045 |
+
TORCH_CHECK(head_size % 8 == 0, "head_size should be a multiple of 8");
|
| 1046 |
+
TORCH_CHECK(head_size <= 256, "FlashAttention backward only supports head dimension at most 256");
|
| 1047 |
+
TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
|
| 1048 |
+
if (softcap > 0.f) { TORCH_CHECK(p_dropout == 0.f, "Softcapping does not support dropout for now"); }
|
| 1049 |
+
|
| 1050 |
+
auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
|
| 1051 |
+
const int head_size_rounded = round_multiple(head_size, head_size <= 128 ? 32 : 64);
|
| 1052 |
+
const int seqlen_q_rounded = round_multiple(max_seqlen_q, 128);
|
| 1053 |
+
const int seqlen_k_rounded = round_multiple(max_seqlen_k, 128);
|
| 1054 |
+
|
| 1055 |
+
if (window_size_left >= max_seqlen_k) { window_size_left = -1; }
|
| 1056 |
+
if (window_size_right >= max_seqlen_k) { window_size_right = -1; }
|
| 1057 |
+
|
| 1058 |
+
CHECK_SHAPE(q, total_q, num_heads, head_size);
|
| 1059 |
+
CHECK_SHAPE(k, total_k, num_heads_k, head_size);
|
| 1060 |
+
CHECK_SHAPE(v, total_k, num_heads_k, head_size);
|
| 1061 |
+
CHECK_SHAPE(out, total_q, num_heads, head_size);
|
| 1062 |
+
CHECK_SHAPE(dout, total_q, num_heads, head_size);
|
| 1063 |
+
CHECK_SHAPE(cu_seqlens_q, batch_size + 1);
|
| 1064 |
+
CHECK_SHAPE(cu_seqlens_k, batch_size + 1);
|
| 1065 |
+
|
| 1066 |
+
at::Tensor dq, dk, dv;
|
| 1067 |
+
if (dq_.has_value()) {
|
| 1068 |
+
dq = dq_.value();
|
| 1069 |
+
TORCH_CHECK(dq.dtype() == q_dtype, "dq must have the same dtype as q");
|
| 1070 |
+
CHECK_DEVICE(dq);
|
| 1071 |
+
TORCH_CHECK(dq.stride(-1) == 1, "dq must have contiguous last dimension");
|
| 1072 |
+
CHECK_SHAPE(dq, total_q, num_heads, head_size);
|
| 1073 |
+
} else {
|
| 1074 |
+
dq = torch::empty_like(q);
|
| 1075 |
+
}
|
| 1076 |
+
if (dk_.has_value()) {
|
| 1077 |
+
dk = dk_.value();
|
| 1078 |
+
TORCH_CHECK(dk.dtype() == q_dtype, "dk must have the same dtype as q");
|
| 1079 |
+
CHECK_DEVICE(dk);
|
| 1080 |
+
TORCH_CHECK(dk.stride(-1) == 1, "dk must have contiguous last dimension");
|
| 1081 |
+
CHECK_SHAPE(dk, total_k, num_heads_k, head_size);
|
| 1082 |
+
} else {
|
| 1083 |
+
dk = torch::empty_like(k);
|
| 1084 |
+
}
|
| 1085 |
+
if (dv_.has_value()) {
|
| 1086 |
+
dv = dv_.value();
|
| 1087 |
+
TORCH_CHECK(dv.dtype() == q_dtype, "dv must have the same dtype as q");
|
| 1088 |
+
CHECK_DEVICE(dv);
|
| 1089 |
+
TORCH_CHECK(dv.stride(-1) == 1, "dv must have contiguous last dimension");
|
| 1090 |
+
CHECK_SHAPE(dv, total_k, num_heads_k, head_size);
|
| 1091 |
+
} else {
|
| 1092 |
+
dv = torch::empty_like(v);
|
| 1093 |
+
}
|
| 1094 |
+
|
| 1095 |
+
// bool loop = max_seqlen_k > blocksize_c;
|
| 1096 |
+
// TODO: change later, for now set to true for simplicity
|
| 1097 |
+
bool loop = true;
|
| 1098 |
+
|
| 1099 |
+
auto opts = q.options();
|
| 1100 |
+
auto softmax_d = torch::empty({num_heads, total_q + 128 * batch_size}, opts.dtype(at::kFloat));
|
| 1101 |
+
at::Tensor dq_accum;
|
| 1102 |
+
if (loop) {
|
| 1103 |
+
// We don't want to allocate dq_accum of size (batch, seqlen_q_rounded, num_heads, head_size_rounded)
|
| 1104 |
+
// because that would be too large if there is a very long sequence and the rest of the sequences are short.
|
| 1105 |
+
// Instead, we allocate dq_accum of size (total_q + 128 * batch, num_heads, head_size_rounded).
|
| 1106 |
+
// Note that 128 is the max block size on the seqlen_q dimension.
|
| 1107 |
+
// For dQ, the i-th sequence is stored in indices from cu_seqlens[i] + 128 * i to
|
| 1108 |
+
// cu_seqlens[i + 1] * 128 * i - 1. This ensures that the i-th sequence and (i + 1)-th sequence will
|
| 1109 |
+
// be at least 128 apart. It's ok for us to do atomicAdds up to 128 rows beyond what we're normally
|
| 1110 |
+
// allowed to do. So we won't have to do any bound checking, and performance should stay the same.
|
| 1111 |
+
// Same holds for softmax_d, since LSE is stored in unpadded format.
|
| 1112 |
+
if (!deterministic) {
|
| 1113 |
+
dq_accum = torch::empty({total_q + 128 * batch_size, num_heads, head_size_rounded}, opts.dtype(at::kFloat));
|
| 1114 |
+
} else {
|
| 1115 |
+
const int nsplits = (get_num_sm(get_current_device()) + batch_size * num_heads - 1) / (batch_size * num_heads);
|
| 1116 |
+
dq_accum = torch::zeros({nsplits, total_q + 128 * batch_size, num_heads, head_size_rounded}, opts.dtype(at::kFloat));
|
| 1117 |
+
}
|
| 1118 |
+
}
|
| 1119 |
+
|
| 1120 |
+
at::Tensor dk_expanded, dv_expanded;
|
| 1121 |
+
if (num_heads_k != num_heads) { // MQA / GQA
|
| 1122 |
+
dk_expanded = torch::empty({total_k, num_heads, head_size}, opts);
|
| 1123 |
+
dv_expanded = torch::empty({total_k, num_heads, head_size}, opts);
|
| 1124 |
+
} else {
|
| 1125 |
+
dk_expanded = dk;
|
| 1126 |
+
dv_expanded = dv;
|
| 1127 |
+
}
|
| 1128 |
+
|
| 1129 |
+
if( zero_tensors ) {
|
| 1130 |
+
dq.zero_();
|
| 1131 |
+
dk_expanded.zero_();
|
| 1132 |
+
dv_expanded.zero_();
|
| 1133 |
+
softmax_d.zero_();
|
| 1134 |
+
}
|
| 1135 |
+
|
| 1136 |
+
Flash_bwd_params params;
|
| 1137 |
+
|
| 1138 |
+
set_params_dgrad(params,
|
| 1139 |
+
batch_size,
|
| 1140 |
+
max_seqlen_q, max_seqlen_k,
|
| 1141 |
+
seqlen_q_rounded, seqlen_k_rounded,
|
| 1142 |
+
num_heads, num_heads_k,
|
| 1143 |
+
head_size, head_size_rounded,
|
| 1144 |
+
q, k, v, out,
|
| 1145 |
+
dout, dq, dk_expanded, dv_expanded,
|
| 1146 |
+
cu_seqlens_q.data_ptr(),
|
| 1147 |
+
cu_seqlens_k.data_ptr(),
|
| 1148 |
+
loop ? dq_accum.data_ptr() : nullptr,
|
| 1149 |
+
nullptr,
|
| 1150 |
+
nullptr,
|
| 1151 |
+
softmax_lse.data_ptr(),
|
| 1152 |
+
softmax_d.data_ptr(),
|
| 1153 |
+
p_dropout,
|
| 1154 |
+
softmax_scale,
|
| 1155 |
+
window_size_left,
|
| 1156 |
+
window_size_right,
|
| 1157 |
+
softcap,
|
| 1158 |
+
deterministic,
|
| 1159 |
+
/*unpadded_lse*/true);
|
| 1160 |
+
params.dq_accum_split_stride = !deterministic ? 0 : dq_accum.stride(0);
|
| 1161 |
+
params.total_q = total_q;
|
| 1162 |
+
|
| 1163 |
+
auto launch = &run_mha_bwd;
|
| 1164 |
+
|
| 1165 |
+
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
|
| 1166 |
+
gen_, at::cuda::detail::getDefaultCUDAGenerator());
|
| 1167 |
+
|
| 1168 |
+
// We use a custom RNG that increases the offset by batch_size * nheads * 32.
|
| 1169 |
+
int64_t counter_offset = params.b * params.h * 32;
|
| 1170 |
+
|
| 1171 |
+
if ( rng_state.has_value() ) {
|
| 1172 |
+
params.rng_state = reinterpret_cast<uint64_t*>(rng_state.value().data_ptr());
|
| 1173 |
+
} else if( is_dropout ) {
|
| 1174 |
+
// See Note [Acquire lock when using random generators]
|
| 1175 |
+
std::lock_guard<std::mutex> lock(gen->mutex_);
|
| 1176 |
+
params.philox_args = gen->philox_cuda_state(counter_offset);
|
| 1177 |
+
auto seeds = at::cuda::philox::unpack(params.philox_args);
|
| 1178 |
+
params.rng_state[0] = std::get<0>(seeds);
|
| 1179 |
+
params.rng_state[1] = std::get<1>(seeds);
|
| 1180 |
+
}
|
| 1181 |
+
|
| 1182 |
+
set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
|
| 1183 |
+
|
| 1184 |
+
if (max_seqlen_q > 0) {
|
| 1185 |
+
launch(params, stream);
|
| 1186 |
+
} else {
|
| 1187 |
+
// If seqlen_q == 0, then we have an empty tensor. We need to set the output to 0.
|
| 1188 |
+
dk_expanded.zero_();
|
| 1189 |
+
dv_expanded.zero_();
|
| 1190 |
+
softmax_d.zero_();
|
| 1191 |
+
}
|
| 1192 |
+
|
| 1193 |
+
// For MQA/GQA we need to sum dK and dV across the groups
|
| 1194 |
+
if (num_heads_k != num_heads) {
|
| 1195 |
+
at::sum_out(dk, at::reshape(dk_expanded, {total_k, num_heads_k, num_heads / num_heads_k, head_size}), {2});
|
| 1196 |
+
at::sum_out(dv, at::reshape(dv_expanded, {total_k, num_heads_k, num_heads / num_heads_k, head_size}), {2});
|
| 1197 |
+
}
|
| 1198 |
+
|
| 1199 |
+
return { dq, dk, dv, softmax_d };
|
| 1200 |
+
}
|
| 1201 |
+
|
| 1202 |
+
std::vector<at::Tensor>
|
| 1203 |
+
mha_fwd_kvcache(at::Tensor &q, // batch_size x seqlen_q x num_heads x head_size
|
| 1204 |
+
const at::Tensor &kcache, // batch_size_c x seqlen_k x num_heads_k x head_size or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table.
|
| 1205 |
+
const at::Tensor &vcache, // batch_size_c x seqlen_k x num_heads_k x head_size or num_blocks x page_block_size x num_heads_k x head_size if there's a block_table.
|
| 1206 |
+
std::optional<const at::Tensor> &k_, // batch_size x seqlen_knew x num_heads_k x head_size
|
| 1207 |
+
std::optional<const at::Tensor> &v_, // batch_size x seqlen_knew x num_heads_k x head_size
|
| 1208 |
+
std::optional<const at::Tensor> &seqlens_k_, // batch_size
|
| 1209 |
+
std::optional<const at::Tensor> &rotary_cos_, // seqlen_ro x (rotary_dim / 2)
|
| 1210 |
+
std::optional<const at::Tensor> &rotary_sin_, // seqlen_ro x (rotary_dim / 2)
|
| 1211 |
+
std::optional<const at::Tensor> &cache_batch_idx_, // indices to index into the KV cache
|
| 1212 |
+
std::optional<const at::Tensor> &leftpad_k_, // batch_size
|
| 1213 |
+
std::optional<at::Tensor> &block_table_, // batch_size x max_num_blocks_per_seq
|
| 1214 |
+
std::optional<at::Tensor> &alibi_slopes_, // num_heads or batch_size x num_heads
|
| 1215 |
+
std::optional<at::Tensor> &out_, // batch_size x seqlen_q x num_heads x head_size
|
| 1216 |
+
const float softmax_scale,
|
| 1217 |
+
bool is_causal,
|
| 1218 |
+
int window_size_left,
|
| 1219 |
+
int window_size_right,
|
| 1220 |
+
const float softcap,
|
| 1221 |
+
bool is_rotary_interleaved, // if true, rotary combines indices 0 & 1, else indices 0 & rotary_dim / 2
|
| 1222 |
+
int num_splits
|
| 1223 |
+
) {
|
| 1224 |
+
|
| 1225 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 1226 |
+
at::cuda::CUDAGuard device_guard{q.device()};
|
| 1227 |
+
|
| 1228 |
+
auto [cc_major, cc_minor] = get_compute_capability(get_current_device());
|
| 1229 |
+
bool is_sm8x_min = cc_major >= 8;
|
| 1230 |
+
TORCH_CHECK(is_sm8x_min, "FlashAttention only supports Ampere GPUs or newer.");
|
| 1231 |
+
|
| 1232 |
+
auto q_dtype = q.dtype();
|
| 1233 |
+
TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
|
| 1234 |
+
"FlashAttention only support fp16 and bf16 data type");
|
| 1235 |
+
TORCH_CHECK(kcache.dtype() == q_dtype, "query and key must have the same dtype");
|
| 1236 |
+
TORCH_CHECK(vcache.dtype() == q_dtype, "query and value must have the same dtype");
|
| 1237 |
+
|
| 1238 |
+
CHECK_DEVICE(q); CHECK_DEVICE(kcache); CHECK_DEVICE(vcache);
|
| 1239 |
+
|
| 1240 |
+
TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 1241 |
+
TORCH_CHECK(kcache.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 1242 |
+
TORCH_CHECK(vcache.stride(-1) == 1, "Input tensor must have contiguous last dimension");
|
| 1243 |
+
|
| 1244 |
+
at::Tensor block_table;
|
| 1245 |
+
const bool paged_KV = block_table_.has_value();
|
| 1246 |
+
if (paged_KV) {
|
| 1247 |
+
TORCH_CHECK(!cache_batch_idx_.has_value(), "Paged KVcache does not support cache_batch_idx");
|
| 1248 |
+
block_table = block_table_.value();
|
| 1249 |
+
CHECK_DEVICE(block_table);
|
| 1250 |
+
TORCH_CHECK(block_table.dtype() == torch::kInt32, "block_table must have dtype torch.int32");
|
| 1251 |
+
TORCH_CHECK(block_table.stride(-1) == 1, "block_table must have contiguous last dimension");
|
| 1252 |
+
}
|
| 1253 |
+
|
| 1254 |
+
const auto sizes = q.sizes();
|
| 1255 |
+
|
| 1256 |
+
const int batch_size = sizes[0];
|
| 1257 |
+
int seqlen_q = sizes[1];
|
| 1258 |
+
int num_heads = sizes[2];
|
| 1259 |
+
const int head_size_og = sizes[3];
|
| 1260 |
+
|
| 1261 |
+
const int max_num_blocks_per_seq = !paged_KV ? 0 : block_table.size(1);
|
| 1262 |
+
const int num_blocks = !paged_KV ? 0 : kcache.size(0);
|
| 1263 |
+
const int page_block_size = !paged_KV ? 1 : kcache.size(1);
|
| 1264 |
+
TORCH_CHECK(!paged_KV || page_block_size % 256 == 0, "Paged KV cache block size must be divisible by 256");
|
| 1265 |
+
const int seqlen_k = !paged_KV ? kcache.size(1) : max_num_blocks_per_seq * page_block_size;
|
| 1266 |
+
const int num_heads_k = kcache.size(2);
|
| 1267 |
+
const int batch_size_c = !paged_KV ? kcache.size(0) : batch_size;
|
| 1268 |
+
TORCH_CHECK(batch_size > 0, "batch size must be positive");
|
| 1269 |
+
TORCH_CHECK(head_size_og <= 256, "FlashAttention forward only supports head dimension at most 256");
|
| 1270 |
+
TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
|
| 1271 |
+
|
| 1272 |
+
// causal=true is the same as causal=false in this case
|
| 1273 |
+
if (seqlen_q == 1 && !alibi_slopes_.has_value()) { is_causal = false; }
|
| 1274 |
+
if (is_causal) { window_size_right = 0; }
|
| 1275 |
+
|
| 1276 |
+
// Faster to transpose q from (b, 1, (nheads_kv ngroups), d) to (b, ngroups, nheads_kv, d) in this case
|
| 1277 |
+
// H/t Daniel Haziza
|
| 1278 |
+
const int seqlenq_ngroups_swapped = seqlen_q == 1 && num_heads > num_heads_k && window_size_left < 0 && window_size_right < 0 && head_size_og % 8 == 0 && !alibi_slopes_.has_value();
|
| 1279 |
+
if (seqlenq_ngroups_swapped) {
|
| 1280 |
+
const int ngroups = num_heads / num_heads_k;
|
| 1281 |
+
q = q.reshape({batch_size, num_heads_k, ngroups, head_size_og}).transpose(1, 2);
|
| 1282 |
+
seqlen_q = ngroups;
|
| 1283 |
+
num_heads = num_heads_k;
|
| 1284 |
+
}
|
| 1285 |
+
|
| 1286 |
+
if (window_size_left >= seqlen_k) { window_size_left = -1; }
|
| 1287 |
+
if (window_size_right >= seqlen_k) { window_size_right = -1; }
|
| 1288 |
+
|
| 1289 |
+
CHECK_SHAPE(q, batch_size, seqlen_q, num_heads, head_size_og);
|
| 1290 |
+
if (!paged_KV) {
|
| 1291 |
+
CHECK_SHAPE(kcache, batch_size_c, seqlen_k, num_heads_k, head_size_og);
|
| 1292 |
+
CHECK_SHAPE(vcache, batch_size_c, seqlen_k, num_heads_k, head_size_og);
|
| 1293 |
+
} else {
|
| 1294 |
+
CHECK_SHAPE(kcache, num_blocks, page_block_size, num_heads_k, head_size_og);
|
| 1295 |
+
CHECK_SHAPE(vcache, num_blocks, page_block_size, num_heads_k, head_size_og);
|
| 1296 |
+
CHECK_SHAPE(block_table, batch_size, max_num_blocks_per_seq);
|
| 1297 |
+
}
|
| 1298 |
+
|
| 1299 |
+
at::Tensor q_padded, kcache_padded, vcache_padded;
|
| 1300 |
+
if (head_size_og % 8 != 0) {
|
| 1301 |
+
q_padded = torch::nn::functional::pad(q, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
|
| 1302 |
+
kcache_padded = torch::nn::functional::pad(kcache, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
|
| 1303 |
+
vcache_padded = torch::nn::functional::pad(vcache, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
|
| 1304 |
+
} else {
|
| 1305 |
+
q_padded = q;
|
| 1306 |
+
kcache_padded = kcache;
|
| 1307 |
+
vcache_padded = vcache;
|
| 1308 |
+
}
|
| 1309 |
+
|
| 1310 |
+
at::Tensor out;
|
| 1311 |
+
if (out_.has_value()) {
|
| 1312 |
+
out = out_.value();
|
| 1313 |
+
TORCH_CHECK(out.dtype() == q_dtype, "Output must have the same dtype as inputs");
|
| 1314 |
+
CHECK_DEVICE(out);
|
| 1315 |
+
TORCH_CHECK(out.stride(-1) == 1, "Output tensor must have contiguous last dimension");
|
| 1316 |
+
CHECK_SHAPE(out, batch_size, seqlen_q, num_heads, head_size_og);
|
| 1317 |
+
if (head_size_og % 8 != 0) { out = torch::empty_like(q_padded); }
|
| 1318 |
+
} else {
|
| 1319 |
+
out = torch::empty_like(q_padded);
|
| 1320 |
+
}
|
| 1321 |
+
|
| 1322 |
+
auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
|
| 1323 |
+
const int head_size = round_multiple(head_size_og, 8);
|
| 1324 |
+
const int head_size_rounded = round_multiple(head_size, head_size <= 128 ? 32 : 64);
|
| 1325 |
+
const int seqlen_q_rounded = round_multiple(seqlen_q, 128);
|
| 1326 |
+
const int seqlen_k_rounded = round_multiple(seqlen_k, 128);
|
| 1327 |
+
|
| 1328 |
+
auto opts = q.options();
|
| 1329 |
+
|
| 1330 |
+
auto softmax_lse = torch::empty({batch_size, num_heads, seqlen_q}, opts.dtype(at::kFloat));
|
| 1331 |
+
|
| 1332 |
+
Flash_fwd_params params;
|
| 1333 |
+
set_params_fprop(params,
|
| 1334 |
+
batch_size,
|
| 1335 |
+
seqlen_q, seqlen_k,
|
| 1336 |
+
seqlen_q_rounded, seqlen_k_rounded,
|
| 1337 |
+
num_heads, num_heads_k,
|
| 1338 |
+
head_size, head_size_rounded,
|
| 1339 |
+
q_padded, kcache_padded, vcache_padded, out,
|
| 1340 |
+
/*cu_seqlens_q_d=*/nullptr,
|
| 1341 |
+
/*cu_seqlens_k_d=*/nullptr,
|
| 1342 |
+
/*seqused_k=*/nullptr,
|
| 1343 |
+
/*p_ptr=*/nullptr,
|
| 1344 |
+
softmax_lse.data_ptr(),
|
| 1345 |
+
/*p_dropout=*/0.f,
|
| 1346 |
+
softmax_scale,
|
| 1347 |
+
window_size_left,
|
| 1348 |
+
window_size_right,
|
| 1349 |
+
softcap
|
| 1350 |
+
);
|
| 1351 |
+
|
| 1352 |
+
at::Tensor k, v, k_padded, v_padded;
|
| 1353 |
+
if (k_.has_value()) {
|
| 1354 |
+
TORCH_CHECK(v_.has_value(), "If key is supplied, value must also be passed in");
|
| 1355 |
+
TORCH_CHECK(seqlens_k_.has_value(), "If key is supplied, seqlens_k must also be passed in");
|
| 1356 |
+
TORCH_CHECK(seqlen_q <= seqlen_k, "If key is supplied, it must have seqlen <= the seqlen of the KV cache");
|
| 1357 |
+
k = k_.value();
|
| 1358 |
+
v = v_.value();
|
| 1359 |
+
TORCH_CHECK(k.dtype() == q_dtype, "Key must have the same dtype as query");
|
| 1360 |
+
TORCH_CHECK(v.dtype() == q_dtype, "Value must have the same dtype as query");
|
| 1361 |
+
CHECK_DEVICE(k); CHECK_DEVICE(v);
|
| 1362 |
+
TORCH_CHECK(k.stride(-1) == 1, "Key tensor must have contiguous last dimension");
|
| 1363 |
+
TORCH_CHECK(v.stride(-1) == 1, "Value tensor must have contiguous last dimension");
|
| 1364 |
+
int seqlen_knew = k.size(1);
|
| 1365 |
+
CHECK_SHAPE(k, batch_size, seqlen_knew, num_heads_k, head_size_og);
|
| 1366 |
+
CHECK_SHAPE(v, batch_size, seqlen_knew, num_heads_k, head_size_og);
|
| 1367 |
+
if (head_size_og % 8 != 0) {
|
| 1368 |
+
k_padded = torch::nn::functional::pad(k, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
|
| 1369 |
+
v_padded = torch::nn::functional::pad(v, torch::nn::functional::PadFuncOptions({0, 8 - head_size_og % 8}));
|
| 1370 |
+
} else {
|
| 1371 |
+
k_padded = k;
|
| 1372 |
+
v_padded = v;
|
| 1373 |
+
}
|
| 1374 |
+
params.seqlen_knew = seqlen_knew;
|
| 1375 |
+
params.knew_ptr = k_padded.data_ptr();
|
| 1376 |
+
params.vnew_ptr = v_padded.data_ptr();
|
| 1377 |
+
// All stride are in elements, not bytes.
|
| 1378 |
+
params.knew_batch_stride = k_padded.stride(0);
|
| 1379 |
+
params.vnew_batch_stride = v_padded.stride(0);
|
| 1380 |
+
params.knew_row_stride = k_padded.stride(-3);
|
| 1381 |
+
params.vnew_row_stride = v_padded.stride(-3);
|
| 1382 |
+
params.knew_head_stride = k_padded.stride(-2);
|
| 1383 |
+
params.vnew_head_stride = v_padded.stride(-2);
|
| 1384 |
+
}
|
| 1385 |
+
|
| 1386 |
+
if (seqlens_k_.has_value()) {
|
| 1387 |
+
auto seqlens_k = seqlens_k_.value();
|
| 1388 |
+
TORCH_CHECK(seqlens_k.dtype() == torch::kInt32, "seqlens_k must have dtype int32");
|
| 1389 |
+
CHECK_DEVICE(seqlens_k);
|
| 1390 |
+
CHECK_CONTIGUOUS(seqlens_k);
|
| 1391 |
+
CHECK_SHAPE(seqlens_k, batch_size);
|
| 1392 |
+
params.cu_seqlens_k = static_cast<int *>(seqlens_k.data_ptr());
|
| 1393 |
+
}
|
| 1394 |
+
params.is_seqlens_k_cumulative = !(seqlens_k_.has_value());
|
| 1395 |
+
if (leftpad_k_.has_value()) {
|
| 1396 |
+
TORCH_CHECK(!paged_KV, "We don't support Paged KV and leftpad_k running at the same time yet");
|
| 1397 |
+
auto leftpad_k = leftpad_k_.value();
|
| 1398 |
+
TORCH_CHECK(leftpad_k.dtype() == torch::kInt32, "leftpad_k must have dtype int32");
|
| 1399 |
+
CHECK_DEVICE(leftpad_k);
|
| 1400 |
+
CHECK_CONTIGUOUS(leftpad_k);
|
| 1401 |
+
CHECK_SHAPE(leftpad_k, batch_size);
|
| 1402 |
+
params.leftpad_k = static_cast<int *>(leftpad_k.data_ptr());
|
| 1403 |
+
}
|
| 1404 |
+
|
| 1405 |
+
if (rotary_cos_.has_value()) {
|
| 1406 |
+
TORCH_CHECK(k_.has_value(), "If rotary cos/sin are provided, new key / value to be appended to KV cache must also be provided");
|
| 1407 |
+
auto rotary_cos = rotary_cos_.value();
|
| 1408 |
+
CHECK_DEVICE(rotary_cos);
|
| 1409 |
+
params.rotary_dim = rotary_cos.size(1) * 2;
|
| 1410 |
+
TORCH_CHECK(params.rotary_dim <= head_size, "rotary_dim must be <= headdim");
|
| 1411 |
+
TORCH_CHECK(params.rotary_dim % 16 == 0, "Only rotary dimensions divisible by 16 are currently supported");
|
| 1412 |
+
const int seqlen_ro = rotary_cos.size(0);
|
| 1413 |
+
TORCH_CHECK(seqlen_ro >= seqlen_k, "cos/sin seqlen must be at least the seqlen of KV cache");
|
| 1414 |
+
CHECK_SHAPE(rotary_cos, seqlen_ro, params.rotary_dim / 2);
|
| 1415 |
+
CHECK_CONTIGUOUS(rotary_cos);
|
| 1416 |
+
TORCH_CHECK(rotary_cos.scalar_type() == q_dtype, "rotary_cos must have the same dtype as query");
|
| 1417 |
+
|
| 1418 |
+
TORCH_CHECK(rotary_sin_.has_value(), "If rotary cos is provided, rotary sin must also be provided");
|
| 1419 |
+
auto rotary_sin = rotary_sin_.value();
|
| 1420 |
+
CHECK_DEVICE(rotary_sin);
|
| 1421 |
+
CHECK_SHAPE(rotary_sin, seqlen_ro, params.rotary_dim / 2);
|
| 1422 |
+
CHECK_CONTIGUOUS(rotary_sin);
|
| 1423 |
+
TORCH_CHECK(rotary_sin.scalar_type() == q_dtype, "rotary_cos must have the same dtype as query");
|
| 1424 |
+
params.rotary_cos_ptr = rotary_cos.data_ptr();
|
| 1425 |
+
params.rotary_sin_ptr = rotary_sin.data_ptr();
|
| 1426 |
+
params.is_rotary_interleaved = is_rotary_interleaved;
|
| 1427 |
+
} else {
|
| 1428 |
+
params.rotary_dim = 0;
|
| 1429 |
+
}
|
| 1430 |
+
|
| 1431 |
+
if (cache_batch_idx_.has_value()) {
|
| 1432 |
+
auto cache_batch_idx = cache_batch_idx_.value();
|
| 1433 |
+
CHECK_DEVICE(cache_batch_idx);
|
| 1434 |
+
CHECK_CONTIGUOUS(cache_batch_idx);
|
| 1435 |
+
TORCH_CHECK(cache_batch_idx.scalar_type() == torch::kInt32, "cache_batch_idx must have dtype int32");
|
| 1436 |
+
params.cache_batch_idx = reinterpret_cast<int *>(cache_batch_idx.data_ptr());
|
| 1437 |
+
}
|
| 1438 |
+
|
| 1439 |
+
// Keep references to these tensors to extend their lifetime
|
| 1440 |
+
at::Tensor softmax_lse_accum, out_accum;
|
| 1441 |
+
std::tie(softmax_lse_accum, out_accum) = set_params_splitkv(
|
| 1442 |
+
params, batch_size, num_heads, head_size, seqlen_k, seqlen_q,
|
| 1443 |
+
head_size_rounded, /*dropout*/ 0.f, num_splits, get_num_sm(get_current_device()), opts);
|
| 1444 |
+
|
| 1445 |
+
if (paged_KV) {
|
| 1446 |
+
params.block_table = block_table.data_ptr<int>();
|
| 1447 |
+
params.block_table_batch_stride = block_table.stride(0);
|
| 1448 |
+
}
|
| 1449 |
+
params.page_block_size = page_block_size;
|
| 1450 |
+
|
| 1451 |
+
|
| 1452 |
+
set_params_alibi(params, alibi_slopes_, batch_size, num_heads);
|
| 1453 |
+
|
| 1454 |
+
auto stream = at::cuda::getCurrentCUDAStream().stream();
|
| 1455 |
+
// Only split kernel supports appending to KV cache, or indexing to the cache with cache_batch_idx,
|
| 1456 |
+
// or paged KV cache
|
| 1457 |
+
run_mha_fwd(params, stream, /*force_split_kernel=*/k_.has_value() || cache_batch_idx_.has_value() || paged_KV);
|
| 1458 |
+
|
| 1459 |
+
if (head_size_og % 8 != 0) {
|
| 1460 |
+
out = out.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
|
| 1461 |
+
if (out_.has_value()) { out_.value().copy_(out); }
|
| 1462 |
+
if (k_.has_value()) {
|
| 1463 |
+
// It's expensive to copy the KV cache here for the case where head size not divisible by 8,
|
| 1464 |
+
// but we don't expect to get this case in practice. This is just so that the code works for that case.
|
| 1465 |
+
kcache.copy_(kcache_padded.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)}));
|
| 1466 |
+
vcache.copy_(vcache_padded.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)}));
|
| 1467 |
+
}
|
| 1468 |
+
}
|
| 1469 |
+
|
| 1470 |
+
if (seqlenq_ngroups_swapped) {
|
| 1471 |
+
out = out.transpose(1, 2).reshape({batch_size, 1, num_heads_k * seqlen_q, head_size_og});
|
| 1472 |
+
softmax_lse = softmax_lse.reshape({batch_size, num_heads_k * seqlen_q, 1});
|
| 1473 |
+
}
|
| 1474 |
+
return {out, softmax_lse};
|
| 1475 |
+
}
|
| 1476 |
+
} // namespace FLASH_NAMESPACE
|
| 1477 |
+
|
| 1478 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 1479 |
+
m.doc() = "FlashAttention";
|
| 1480 |
+
m.def("fwd", &FLASH_NAMESPACE::mha_fwd, "Forward pass");
|
| 1481 |
+
m.def("varlen_fwd", &FLASH_NAMESPACE::mha_varlen_fwd, "Forward pass (variable length)");
|
| 1482 |
+
m.def("bwd", &FLASH_NAMESPACE::mha_bwd, "Backward pass");
|
| 1483 |
+
m.def("varlen_bwd", &FLASH_NAMESPACE::mha_varlen_bwd, "Backward pass (variable length)");
|
| 1484 |
+
m.def("fwd_kvcache", &FLASH_NAMESPACE::mha_fwd_kvcache, "Forward pass, with KV-cache");
|
| 1485 |
+
}
|
Code/Baselines/flash-attention/csrc/ft_attention/README.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Attention kernel from FasterTransformer
|
| 2 |
+
|
| 3 |
+
This CUDA extension wraps the single-query attention [kernel](https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention/decoder_masked_multihead_attention_template.hpp) from
|
| 4 |
+
FasterTransformer v5.2.1 for benchmarking purpose.
|
| 5 |
+
|
| 6 |
+
```sh
|
| 7 |
+
cd csrc/ft_attention && pip install .
|
| 8 |
+
```
|
| 9 |
+
|
| 10 |
+
As of 2023-09-17, this extension is no longer used in the FlashAttention repo.
|
| 11 |
+
FlashAttention now has implemented
|
| 12 |
+
[`flash_attn_with_kvcache`](https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/flash_attention_interface.py)
|
| 13 |
+
with all the features of this `ft_attention` kernel (and more).
|
| 14 |
+
|
Code/Baselines/flash-attention/csrc/ft_attention/cuda_bf16_fallbacks.cuh
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Downloaded from from FasterTransformer v5.2.1
|
| 2 |
+
// https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/utils/cuda_bf16_fallbacks.cuh
|
| 3 |
+
/*
|
| 4 |
+
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
* you may not use this file except in compliance with the License.
|
| 8 |
+
* You may obtain a copy of the License at
|
| 9 |
+
*
|
| 10 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
*
|
| 12 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
* See the License for the specific language governing permissions and
|
| 16 |
+
* limitations under the License.
|
| 17 |
+
*/
|
| 18 |
+
|
| 19 |
+
#pragma once
|
| 20 |
+
|
| 21 |
+
#include "cuda_bf16_wrapper.h"
|
| 22 |
+
#include <cuda_fp16.h>
|
| 23 |
+
|
| 24 |
+
namespace fastertransformer {
|
| 25 |
+
|
| 26 |
+
#ifdef ENABLE_BF16
|
| 27 |
+
inline __device__ float2 bf1622float2(const __nv_bfloat162 val) {
|
| 28 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 29 |
+
float2 f_val;
|
| 30 |
+
f_val.x = __low2float(val);
|
| 31 |
+
f_val.y = __high2float(val);
|
| 32 |
+
return f_val;
|
| 33 |
+
#else
|
| 34 |
+
return __bfloat1622float2(val);
|
| 35 |
+
#endif
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
inline __device__ int16_t bf1622int16(__nv_bfloat162 val) {
|
| 39 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 40 |
+
float2 f_val;
|
| 41 |
+
f_val.x = max(min(__low2float(val), 127.f), -128.f);
|
| 42 |
+
f_val.y = max(min(__high2float(val), 127.f), -128.f);
|
| 43 |
+
union { int8_t int8[2]; int16_t int16; };
|
| 44 |
+
int8[0] = static_cast<int8_t>(static_cast<short>(f_val.x));
|
| 45 |
+
int8[1] = static_cast<int8_t>(static_cast<short>(f_val.y));
|
| 46 |
+
return int16;
|
| 47 |
+
#else
|
| 48 |
+
val = __hmin2(val, make_bfloat162(127., 127.));
|
| 49 |
+
val = __hmax2(val, make_bfloat162(-128., -128.));
|
| 50 |
+
union { int8_t int8[2]; int16_t int16; };
|
| 51 |
+
int8[0] = static_cast<int8_t>(static_cast<short>(val.x));
|
| 52 |
+
int8[1] = static_cast<int8_t>(static_cast<short>(val.y));
|
| 53 |
+
return int16;
|
| 54 |
+
#endif
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
inline __device__ __nv_bfloat162 float22bf162(const float2 val) {
|
| 58 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 59 |
+
return __floats2bfloat162_rn(val.x, val.y);
|
| 60 |
+
#else
|
| 61 |
+
return __float22bfloat162_rn(val);
|
| 62 |
+
#endif
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
inline __device__ __nv_bfloat162 bf162bf162(const __nv_bfloat16 val) {
|
| 66 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 67 |
+
__nv_bfloat162 val2;
|
| 68 |
+
val2.x = val;
|
| 69 |
+
val2.y = val;
|
| 70 |
+
return val2;
|
| 71 |
+
#else
|
| 72 |
+
return __bfloat162bfloat162(val);
|
| 73 |
+
#endif
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
inline __device__ __nv_bfloat162 bf16hadd2(const __nv_bfloat162 x, const __nv_bfloat162 y) {
|
| 77 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 78 |
+
float fxl, fxh, fyl, fyh;
|
| 79 |
+
fxl = __low2float(x);
|
| 80 |
+
fxh = __high2float(x);
|
| 81 |
+
fyl = __low2float(y);
|
| 82 |
+
fyh = __high2float(y);
|
| 83 |
+
return __floats2bfloat162_rn(fxl + fyl, fxh + fyh);
|
| 84 |
+
#else
|
| 85 |
+
return __hadd2(x, y);
|
| 86 |
+
#endif
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
inline __device__ __nv_bfloat16 bf16hadd(const __nv_bfloat16 x, const __nv_bfloat16 y) {
|
| 90 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 91 |
+
return __float2bfloat16( __bfloat162float(x) + __bfloat162float(y) );
|
| 92 |
+
#else
|
| 93 |
+
return __hadd(x, y);
|
| 94 |
+
#endif
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
inline __device__ __nv_bfloat162 bf16hsub2(const __nv_bfloat162 x, const __nv_bfloat162 y) {
|
| 98 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 99 |
+
float fxl, fxh, fyl, fyh;
|
| 100 |
+
fxl = __low2float(x);
|
| 101 |
+
fxh = __high2float(x);
|
| 102 |
+
fyl = __low2float(y);
|
| 103 |
+
fyh = __high2float(y);
|
| 104 |
+
return __floats2bfloat162_rn(fxl - fyl, fxh - fyh);
|
| 105 |
+
#else
|
| 106 |
+
return __hsub2(x, y);
|
| 107 |
+
#endif
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
inline __device__ __nv_bfloat16 bf16hsub(const __nv_bfloat16 x, const __nv_bfloat16 y) {
|
| 111 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 112 |
+
return __float2bfloat16( __bfloat162float(x) - __bfloat162float(y) );
|
| 113 |
+
#else
|
| 114 |
+
return __hsub(x, y);
|
| 115 |
+
#endif
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
inline __device__ __nv_bfloat162 bf16hmul2(const __nv_bfloat162 x, const __nv_bfloat162 y) {
|
| 119 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 120 |
+
float fxl, fxh, fyl, fyh;
|
| 121 |
+
fxl = __low2float(x);
|
| 122 |
+
fxh = __high2float(x);
|
| 123 |
+
fyl = __low2float(y);
|
| 124 |
+
fyh = __high2float(y);
|
| 125 |
+
return __floats2bfloat162_rn(fxl * fyl, fxh * fyh);
|
| 126 |
+
#else
|
| 127 |
+
return __hmul2(x, y);
|
| 128 |
+
#endif
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
inline __device__ __nv_bfloat16 bf16hmul(const __nv_bfloat16 x, const __nv_bfloat16 y) {
|
| 132 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 133 |
+
return __float2bfloat16( __bfloat162float(x) * __bfloat162float(y) );
|
| 134 |
+
#else
|
| 135 |
+
return __hmul(x, y);
|
| 136 |
+
#endif
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
inline __device__ __nv_bfloat162 bf16hfma2(const __nv_bfloat162 x, const __nv_bfloat162 y, const __nv_bfloat162 z) {
|
| 140 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 141 |
+
float fxl, fxh, fyl, fyh, fzl, fzh;
|
| 142 |
+
fxl = __low2float(x);
|
| 143 |
+
fxh = __high2float(x);
|
| 144 |
+
fyl = __low2float(y);
|
| 145 |
+
fyh = __high2float(y);
|
| 146 |
+
fzl = __low2float(z);
|
| 147 |
+
fzh = __high2float(z);
|
| 148 |
+
return __floats2bfloat162_rn(fxl * fyl + fzl, fxh * fyh + fzh);
|
| 149 |
+
#else
|
| 150 |
+
return __hfma2(x, y, z);
|
| 151 |
+
#endif
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
inline __device__ __nv_bfloat16 bf16hfma(const __nv_bfloat16 x, const __nv_bfloat16 y, const __nv_bfloat16 z) {
|
| 155 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 156 |
+
return __float2bfloat16( __bfloat162float(x) * __bfloat162float(y) + __bfloat162float(z));
|
| 157 |
+
#else
|
| 158 |
+
return __hfma(x, y, z);
|
| 159 |
+
#endif
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
inline __device__ __nv_bfloat162 bf16exp2(const __nv_bfloat162 x) {
|
| 163 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 164 |
+
float fxl, fxh;
|
| 165 |
+
fxl = __low2float(x);
|
| 166 |
+
fxh = __high2float(x);;
|
| 167 |
+
return __floats2bfloat162_rn(expf(fxl), expf(fxh));
|
| 168 |
+
#else
|
| 169 |
+
return h2exp(x);
|
| 170 |
+
#endif
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)
|
| 174 |
+
inline __device__ __nv_bfloat162 operator*(const __nv_bfloat162 x, const __nv_bfloat162 y) { return bf16hmul2(x, y); };
|
| 175 |
+
inline __device__ __nv_bfloat162 operator+(const __nv_bfloat162 x, const __nv_bfloat162 y) { return bf16hadd2(x, y); };
|
| 176 |
+
|
| 177 |
+
inline __device__ __nv_bfloat162 make_bfloat162(const __nv_bfloat16 x, const __nv_bfloat16 y)
|
| 178 |
+
{
|
| 179 |
+
__nv_bfloat162 t; t.x = x; t.y = y; return t;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
#endif
|
| 183 |
+
|
| 184 |
+
inline __device__ __nv_bfloat16 bf16hadd(__nv_bfloat16 a, __nv_bfloat16 b, __nv_bfloat16 c) {
|
| 185 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 186 |
+
return __float2bfloat16(__bfloat162float(a) + __bfloat162float(b) + __bfloat162float(c));
|
| 187 |
+
#else
|
| 188 |
+
return a + b + c;
|
| 189 |
+
#endif
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
inline __device__ __nv_bfloat16 bf16hadd(__nv_bfloat16 a, __nv_bfloat16 b, __nv_bfloat16 c, __nv_bfloat16 d) {
|
| 193 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 194 |
+
return __float2bfloat16(__bfloat162float(a) + __bfloat162float(b) + __bfloat162float(c) + __bfloat162float(d));
|
| 195 |
+
#else
|
| 196 |
+
return (__nv_bfloat16)((float)a + (float)b + (float)c + (float)d);
|
| 197 |
+
#endif
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
inline __device__ __nv_bfloat162 bf16hadd2(__nv_bfloat162 a, __nv_bfloat162 b, __nv_bfloat162 c) {
|
| 201 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 202 |
+
float fal, fah, fbl, fbh, fcl, fch;
|
| 203 |
+
fal = __low2float(a);
|
| 204 |
+
fah = __high2float(a);
|
| 205 |
+
fbl = __low2float(b);
|
| 206 |
+
fbh = __high2float(b);
|
| 207 |
+
fcl = __low2float(c);
|
| 208 |
+
fch = __high2float(c);
|
| 209 |
+
return __floats2bfloat162_rn(fal + fbl + fcl, fah + fbh + fch);
|
| 210 |
+
#else
|
| 211 |
+
return a + b + c;
|
| 212 |
+
#endif
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
inline __device__ __nv_bfloat16 bf16hmul(__nv_bfloat16 a, __nv_bfloat16 b, __nv_bfloat16 c) {
|
| 216 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 217 |
+
return __float2bfloat16(__bfloat162float(a) * __bfloat162float(b) * __bfloat162float(c));
|
| 218 |
+
#else
|
| 219 |
+
return a * b * c;
|
| 220 |
+
#endif
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
inline __device__ __nv_bfloat162 bf16hmul2(__nv_bfloat162 a, __nv_bfloat162 b, __nv_bfloat162 c) {
|
| 224 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 225 |
+
float fal, fah, fbl, fbh, fcl, fch;
|
| 226 |
+
fal = __low2float(a);
|
| 227 |
+
fah = __high2float(a);
|
| 228 |
+
fbl = __low2float(b);
|
| 229 |
+
fbh = __high2float(b);
|
| 230 |
+
fcl = __low2float(c);
|
| 231 |
+
fch = __high2float(c);
|
| 232 |
+
return __floats2bfloat162_rn(fal * fbl * fcl, fah * fbh * fch);
|
| 233 |
+
#else
|
| 234 |
+
return a * b * c;
|
| 235 |
+
#endif
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
inline __device__ __nv_bfloat162 bf16hfma2(__nv_bfloat162 a, __nv_bfloat162 b, __nv_bfloat162 c, __nv_bfloat162 d) {
|
| 239 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
| 240 |
+
float fal, fah, fbl, fbh, fcl, fch, fdl, fdh;
|
| 241 |
+
fal = __low2float(a);
|
| 242 |
+
fah = __high2float(a);
|
| 243 |
+
fbl = __low2float(b);
|
| 244 |
+
fbh = __high2float(b);
|
| 245 |
+
fcl = __low2float(c);
|
| 246 |
+
fch = __high2float(c);
|
| 247 |
+
fdl = __low2float(d);
|
| 248 |
+
fdh = __high2float(d);
|
| 249 |
+
return __floats2bfloat162_rn(fal * fbl * fcl + fdl, fah * fbh * fch + fdh);
|
| 250 |
+
#else
|
| 251 |
+
return a * b * c + d;
|
| 252 |
+
#endif
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
#endif // ENABLE_BF16
|
| 256 |
+
|
| 257 |
+
} // namespace fastertransformer
|
Code/Baselines/flash-attention/csrc/ft_attention/cuda_bf16_wrapper.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Downloaded from from FasterTransformer v5.2.1
|
| 2 |
+
// https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/utils/cuda_bf16_wrapper.h
|
| 3 |
+
/*
|
| 4 |
+
* Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
* you may not use this file except in compliance with the License.
|
| 8 |
+
* You may obtain a copy of the License at
|
| 9 |
+
*
|
| 10 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
*
|
| 12 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
* See the License for the specific language governing permissions and
|
| 16 |
+
* limitations under the License.
|
| 17 |
+
*/
|
| 18 |
+
|
| 19 |
+
#pragma once
|
| 20 |
+
|
| 21 |
+
#ifdef ENABLE_BF16
|
| 22 |
+
#include <cuda_bf16.h>
|
| 23 |
+
#endif
|
Code/Baselines/flash-attention/csrc/ft_attention/decoder_masked_multihead_attention.h
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Downloaded from from FasterTransformer v5.2.1
|
| 2 |
+
// https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention.h
|
| 3 |
+
/*
|
| 4 |
+
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
* you may not use this file except in compliance with the License.
|
| 8 |
+
* You may obtain a copy of the License at
|
| 9 |
+
*
|
| 10 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
*
|
| 12 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
* See the License for the specific language governing permissions and
|
| 16 |
+
* limitations under the License.
|
| 17 |
+
*/
|
| 18 |
+
|
| 19 |
+
#pragma once
|
| 20 |
+
|
| 21 |
+
#include "cuda_bf16_wrapper.h"
|
| 22 |
+
#include <cuda_fp16.h>
|
| 23 |
+
#include <cuda_runtime_api.h>
|
| 24 |
+
#include <stdint.h>
|
| 25 |
+
#include <stdio.h>
|
| 26 |
+
#include <stdlib.h>
|
| 27 |
+
|
| 28 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 29 |
+
|
| 30 |
+
#define CHECK_CUDA(call) \
|
| 31 |
+
do { \
|
| 32 |
+
cudaError_t status_ = call; \
|
| 33 |
+
if (status_ != cudaSuccess) { \
|
| 34 |
+
fprintf(stderr, "CUDA error (%s:%d): %s\n", __FILE__, __LINE__, cudaGetErrorString(status_)); \
|
| 35 |
+
exit(1); \
|
| 36 |
+
} \
|
| 37 |
+
} while (0)
|
| 38 |
+
|
| 39 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 40 |
+
|
| 41 |
+
// The structure of parameters for the masked multihead attention kernel.
|
| 42 |
+
//
|
| 43 |
+
// We use the following terminology to describe the different dimensions.
|
| 44 |
+
//
|
| 45 |
+
// B: Batch size (number of sequences),
|
| 46 |
+
// L: Sequence length,
|
| 47 |
+
// D: Hidden dimension,
|
| 48 |
+
// H: Number of heads,
|
| 49 |
+
// Dh: Hidden dimension per head - Dh = D / H.
|
| 50 |
+
|
| 51 |
+
template<typename T>
|
| 52 |
+
struct Multihead_attention_params_base {
|
| 53 |
+
|
| 54 |
+
// The output buffer. Dimensions B x D.
|
| 55 |
+
T* out = nullptr;
|
| 56 |
+
|
| 57 |
+
// The input Qs and the associated bias. Dimensions B x D and D, resp.
|
| 58 |
+
const T *q = nullptr, *q_bias = nullptr;
|
| 59 |
+
// The input Ks and the associated bias. Dimensions B x D and D, resp.
|
| 60 |
+
const T *k = nullptr, *k_bias = nullptr;
|
| 61 |
+
// The input Vs and the associated bias. Dimensions B x D and D, resp.
|
| 62 |
+
const T *v = nullptr, *v_bias = nullptr;
|
| 63 |
+
|
| 64 |
+
// The cache for the Ks. The size must be at least B x L x D.
|
| 65 |
+
T* k_cache = nullptr;
|
| 66 |
+
// The cache for the Vs. The size must be at least B x L x D.
|
| 67 |
+
T* v_cache = nullptr;
|
| 68 |
+
// The indirections to use for cache when beam sampling.
|
| 69 |
+
const int* cache_indir = nullptr;
|
| 70 |
+
|
| 71 |
+
// Stride to handle the case when KQV is a single buffer
|
| 72 |
+
int stride_q = 0;
|
| 73 |
+
int stride_k = 0;
|
| 74 |
+
int stride_v = 0;
|
| 75 |
+
|
| 76 |
+
// The batch size.
|
| 77 |
+
int batch_size = 0;
|
| 78 |
+
// The beam width
|
| 79 |
+
int beam_width = 0;
|
| 80 |
+
// The sequence length.
|
| 81 |
+
int memory_max_len = 0;
|
| 82 |
+
// The number of heads (H).
|
| 83 |
+
int num_heads = 0;
|
| 84 |
+
int num_heads_kv = 0;
|
| 85 |
+
int num_heads_q_kv_ratio = 0;
|
| 86 |
+
// The hidden dimension per head (Dh).
|
| 87 |
+
int hidden_size_per_head = 0;
|
| 88 |
+
// The per-head latent space reserved for rotary embeddings.
|
| 89 |
+
int rotary_embedding_dim = 0;
|
| 90 |
+
bool neox_rotary_style = false;
|
| 91 |
+
float rotary_base = 0.0f;
|
| 92 |
+
// The maximum length of input sentences.
|
| 93 |
+
int max_input_length = 0;
|
| 94 |
+
// The current timestep. TODO(bhsueh) Check that do we only this param in cross attention?
|
| 95 |
+
int timestep = 0;
|
| 96 |
+
// The current timestep of each sentences (support different timestep for different sentences)
|
| 97 |
+
|
| 98 |
+
// The 1.f / sqrt(Dh). Computed on the host.
|
| 99 |
+
float inv_sqrt_dh = 0.0f;
|
| 100 |
+
|
| 101 |
+
// Used when we have some input context like gpt
|
| 102 |
+
const int* total_padding_tokens = nullptr;
|
| 103 |
+
|
| 104 |
+
const bool* masked_tokens = nullptr;
|
| 105 |
+
const int* prefix_prompt_lengths = nullptr;
|
| 106 |
+
int max_prefix_prompt_length = 0;
|
| 107 |
+
|
| 108 |
+
const T* relative_attention_bias = nullptr;
|
| 109 |
+
int relative_attention_bias_stride = 0;
|
| 110 |
+
// The slope per head of linear position bias to attention score (H).
|
| 111 |
+
const T* linear_bias_slopes = nullptr;
|
| 112 |
+
|
| 113 |
+
const T* ia3_key_weights = nullptr;
|
| 114 |
+
const T* ia3_value_weights = nullptr;
|
| 115 |
+
const int* ia3_tasks = nullptr;
|
| 116 |
+
|
| 117 |
+
const float* qkv_scale_out = nullptr;
|
| 118 |
+
const float* attention_out_scale = nullptr;
|
| 119 |
+
int int8_mode = 0;
|
| 120 |
+
|
| 121 |
+
const T *rotary_cos = nullptr;
|
| 122 |
+
const T *rotary_sin = nullptr;
|
| 123 |
+
|
| 124 |
+
const int *nnz_head_idx = nullptr;
|
| 125 |
+
int nnz_heads = 0;
|
| 126 |
+
};
|
| 127 |
+
|
| 128 |
+
template<typename T, bool CROSS_ATTENTION>
|
| 129 |
+
struct Multihead_attention_params: public Multihead_attention_params_base<T> {
|
| 130 |
+
// output cross attentions
|
| 131 |
+
float* cross_attention_out = nullptr;
|
| 132 |
+
int max_decoder_seq_len = 0;
|
| 133 |
+
bool is_return_cross_attentions = false;
|
| 134 |
+
|
| 135 |
+
// allows to exist attention eary
|
| 136 |
+
bool* finished = nullptr;
|
| 137 |
+
|
| 138 |
+
// required in case of cross attention
|
| 139 |
+
// will need it here till if constexpr in c++17
|
| 140 |
+
int* memory_length_per_sample = nullptr;
|
| 141 |
+
|
| 142 |
+
// required in case of masked attention with different length
|
| 143 |
+
const int* length_per_sample = nullptr;
|
| 144 |
+
};
|
| 145 |
+
|
| 146 |
+
template<typename T>
|
| 147 |
+
struct Multihead_attention_params<T, true>: public Multihead_attention_params_base<T> {
|
| 148 |
+
// output cross attentions
|
| 149 |
+
float* cross_attention_out = nullptr;
|
| 150 |
+
int max_decoder_seq_len = 0;
|
| 151 |
+
bool is_return_cross_attentions = false;
|
| 152 |
+
|
| 153 |
+
// allows to exist attention eary
|
| 154 |
+
bool* finished = nullptr;
|
| 155 |
+
|
| 156 |
+
// required in case of cross attention
|
| 157 |
+
int* memory_length_per_sample = nullptr;
|
| 158 |
+
|
| 159 |
+
// required in case of masked attention with different length
|
| 160 |
+
const int* length_per_sample = nullptr;
|
| 161 |
+
};
|
| 162 |
+
|
| 163 |
+
template<class T>
|
| 164 |
+
using Masked_multihead_attention_params = Multihead_attention_params<T, false>;
|
| 165 |
+
|
| 166 |
+
template<class T>
|
| 167 |
+
using Cross_multihead_attention_params = Multihead_attention_params<T, true>;
|
| 168 |
+
|
| 169 |
+
template<typename T>
|
| 170 |
+
struct outputCrossAttentionParam {
|
| 171 |
+
// max decoder output length
|
| 172 |
+
int max_decoder_seq_len = 0;
|
| 173 |
+
T* cross_attention_out = nullptr;
|
| 174 |
+
bool is_return_cross_attentions = false;
|
| 175 |
+
};
|
| 176 |
+
|
| 177 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 178 |
+
|
| 179 |
+
void masked_multihead_attention(const Masked_multihead_attention_params<float>& params, const cudaStream_t& stream);
|
| 180 |
+
void masked_multihead_attention(const Masked_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
|
| 181 |
+
#ifdef ENABLE_BF16
|
| 182 |
+
void masked_multihead_attention(const Masked_multihead_attention_params<__nv_bfloat16>& params,
|
| 183 |
+
const cudaStream_t& stream);
|
| 184 |
+
#endif
|
| 185 |
+
void cross_multihead_attention(const Cross_multihead_attention_params<float>& params, const cudaStream_t& stream);
|
| 186 |
+
void cross_multihead_attention(const Cross_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream);
|
| 187 |
+
#ifdef ENABLE_BF16
|
| 188 |
+
void cross_multihead_attention(const Cross_multihead_attention_params<__nv_bfloat16>& params,
|
| 189 |
+
const cudaStream_t& stream);
|
| 190 |
+
#endif
|
| 191 |
+
|
| 192 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
Code/Baselines/flash-attention/csrc/ft_attention/decoder_masked_multihead_attention_utils.h
ADDED
|
@@ -0,0 +1,2017 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Downloaded from from FasterTransformer v5.2.1
|
| 2 |
+
// https://github.com/NVIDIA/FasterTransformer/blob/release/v5.2.1_tag/src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h
|
| 3 |
+
/*
|
| 4 |
+
* Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
|
| 5 |
+
*
|
| 6 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
* you may not use this file except in compliance with the License.
|
| 8 |
+
* You may obtain a copy of the License at
|
| 9 |
+
*
|
| 10 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
*
|
| 12 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
* See the License for the specific language governing permissions and
|
| 16 |
+
* limitations under the License.
|
| 17 |
+
*/
|
| 18 |
+
|
| 19 |
+
#pragma once
|
| 20 |
+
|
| 21 |
+
#include "cuda_bf16_wrapper.h"
|
| 22 |
+
#include "cuda_bf16_fallbacks.cuh"
|
| 23 |
+
#include <stdint.h>
|
| 24 |
+
|
| 25 |
+
using namespace fastertransformer;
|
| 26 |
+
|
| 27 |
+
namespace mmha {
|
| 28 |
+
|
| 29 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 30 |
+
|
| 31 |
+
struct Float8_ {
|
| 32 |
+
float2 x;
|
| 33 |
+
float2 y;
|
| 34 |
+
float2 z;
|
| 35 |
+
float2 w;
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 39 |
+
|
| 40 |
+
struct Float4_ {
|
| 41 |
+
float2 x;
|
| 42 |
+
float2 y;
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 46 |
+
|
| 47 |
+
#ifdef ENABLE_BF16
|
| 48 |
+
struct bf16_4_t {
|
| 49 |
+
__nv_bfloat162 x;
|
| 50 |
+
__nv_bfloat162 y;
|
| 51 |
+
};
|
| 52 |
+
|
| 53 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 54 |
+
|
| 55 |
+
struct bf16_8_t {
|
| 56 |
+
__nv_bfloat162 x;
|
| 57 |
+
__nv_bfloat162 y;
|
| 58 |
+
__nv_bfloat162 z;
|
| 59 |
+
__nv_bfloat162 w;
|
| 60 |
+
};
|
| 61 |
+
#endif
|
| 62 |
+
|
| 63 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 64 |
+
|
| 65 |
+
template<typename T>
|
| 66 |
+
struct num_elems;
|
| 67 |
+
template<>
|
| 68 |
+
struct num_elems<float> {
|
| 69 |
+
static constexpr int value = 1;
|
| 70 |
+
};
|
| 71 |
+
template<>
|
| 72 |
+
struct num_elems<float2> {
|
| 73 |
+
static constexpr int value = 2;
|
| 74 |
+
};
|
| 75 |
+
template<>
|
| 76 |
+
struct num_elems<float4> {
|
| 77 |
+
static constexpr int value = 4;
|
| 78 |
+
};
|
| 79 |
+
template<>
|
| 80 |
+
struct num_elems<Float4_> {
|
| 81 |
+
static constexpr int value = 4;
|
| 82 |
+
};
|
| 83 |
+
template<>
|
| 84 |
+
struct num_elems<Float8_> {
|
| 85 |
+
static constexpr int value = 8;
|
| 86 |
+
};
|
| 87 |
+
|
| 88 |
+
template<>
|
| 89 |
+
struct num_elems<uint32_t> {
|
| 90 |
+
static constexpr int value = 2;
|
| 91 |
+
};
|
| 92 |
+
template<>
|
| 93 |
+
struct num_elems<uint2> {
|
| 94 |
+
static constexpr int value = 4;
|
| 95 |
+
};
|
| 96 |
+
template<>
|
| 97 |
+
struct num_elems<uint4> {
|
| 98 |
+
static constexpr int value = 8;
|
| 99 |
+
};
|
| 100 |
+
|
| 101 |
+
#ifdef ENABLE_BF16
|
| 102 |
+
template<>
|
| 103 |
+
struct num_elems<__nv_bfloat162> {
|
| 104 |
+
static constexpr int value = 2;
|
| 105 |
+
};
|
| 106 |
+
template<>
|
| 107 |
+
struct num_elems<bf16_4_t> {
|
| 108 |
+
static constexpr int value = 4;
|
| 109 |
+
};
|
| 110 |
+
template<>
|
| 111 |
+
struct num_elems<bf16_8_t> {
|
| 112 |
+
static constexpr int value = 8;
|
| 113 |
+
};
|
| 114 |
+
#endif
|
| 115 |
+
|
| 116 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 117 |
+
|
| 118 |
+
template<typename T, int N>
|
| 119 |
+
struct packed_type;
|
| 120 |
+
template<typename T>
|
| 121 |
+
struct packed_type<T, 1> {
|
| 122 |
+
using type = T;
|
| 123 |
+
};
|
| 124 |
+
template<>
|
| 125 |
+
struct packed_type<int8_t, 2> {
|
| 126 |
+
using type = int16_t;
|
| 127 |
+
};
|
| 128 |
+
template<>
|
| 129 |
+
struct packed_type<int8_t, 4> {
|
| 130 |
+
using type = int32_t;
|
| 131 |
+
};
|
| 132 |
+
template<>
|
| 133 |
+
struct packed_type<int8_t, 8> {
|
| 134 |
+
using type = int64_t;
|
| 135 |
+
};
|
| 136 |
+
|
| 137 |
+
template<>
|
| 138 |
+
struct packed_type<float, 2> {
|
| 139 |
+
using type = float2;
|
| 140 |
+
};
|
| 141 |
+
template<>
|
| 142 |
+
struct packed_type<float, 4> {
|
| 143 |
+
using type = float4;
|
| 144 |
+
};
|
| 145 |
+
template<>
|
| 146 |
+
struct packed_type<float, 8> {
|
| 147 |
+
using type = Float8_;
|
| 148 |
+
};
|
| 149 |
+
|
| 150 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 151 |
+
|
| 152 |
+
inline __device__ float add(float a, float b)
|
| 153 |
+
{
|
| 154 |
+
return a + b;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 158 |
+
|
| 159 |
+
inline __device__ float2 add(float2 a, float2 b)
|
| 160 |
+
{
|
| 161 |
+
float2 c;
|
| 162 |
+
c.x = add(a.x, b.x);
|
| 163 |
+
c.y = add(a.y, b.y);
|
| 164 |
+
return c;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 168 |
+
|
| 169 |
+
inline __device__ float4 add(float4 a, float4 b)
|
| 170 |
+
{
|
| 171 |
+
float4 c;
|
| 172 |
+
c.x = add(a.x, b.x);
|
| 173 |
+
c.y = add(a.y, b.y);
|
| 174 |
+
c.z = add(a.z, b.z);
|
| 175 |
+
c.w = add(a.w, b.w);
|
| 176 |
+
return c;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 180 |
+
|
| 181 |
+
#ifdef ENABLE_BF16
|
| 182 |
+
inline __device__ __nv_bfloat16 add(__nv_bfloat16 a, __nv_bfloat16 b)
|
| 183 |
+
{
|
| 184 |
+
return a + b;
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 188 |
+
|
| 189 |
+
inline __device__ __nv_bfloat162 add(__nv_bfloat162 a, __nv_bfloat162 b)
|
| 190 |
+
{
|
| 191 |
+
return bf16hadd2(a, b);
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 195 |
+
|
| 196 |
+
inline __device__ bf16_4_t add(bf16_4_t a, bf16_4_t b)
|
| 197 |
+
{
|
| 198 |
+
bf16_4_t c;
|
| 199 |
+
c.x = add(a.x, b.x);
|
| 200 |
+
c.y = add(a.y, b.y);
|
| 201 |
+
return c;
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 205 |
+
|
| 206 |
+
inline __device__ bf16_8_t add(bf16_8_t a, bf16_8_t b)
|
| 207 |
+
{
|
| 208 |
+
bf16_8_t c;
|
| 209 |
+
c.x = add(a.x, b.x);
|
| 210 |
+
c.y = add(a.y, b.y);
|
| 211 |
+
c.z = add(a.z, b.z);
|
| 212 |
+
c.w = add(a.w, b.w);
|
| 213 |
+
return c;
|
| 214 |
+
}
|
| 215 |
+
#endif // ENABLE_BF16
|
| 216 |
+
|
| 217 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 218 |
+
|
| 219 |
+
inline __device__ uint16_t add(uint16_t a, uint16_t b)
|
| 220 |
+
{
|
| 221 |
+
uint16_t c;
|
| 222 |
+
asm volatile("add.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b));
|
| 223 |
+
return c;
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 227 |
+
|
| 228 |
+
inline __device__ uint32_t add(uint32_t a, uint32_t b)
|
| 229 |
+
{
|
| 230 |
+
uint32_t c;
|
| 231 |
+
asm volatile("add.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b));
|
| 232 |
+
return c;
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 236 |
+
|
| 237 |
+
inline __device__ uint2 add(uint2 a, uint2 b)
|
| 238 |
+
{
|
| 239 |
+
uint2 c;
|
| 240 |
+
c.x = add(a.x, b.x);
|
| 241 |
+
c.y = add(a.y, b.y);
|
| 242 |
+
return c;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 246 |
+
|
| 247 |
+
inline __device__ uint4 add(uint4 a, uint4 b)
|
| 248 |
+
{
|
| 249 |
+
uint4 c;
|
| 250 |
+
c.x = add(a.x, b.x);
|
| 251 |
+
c.y = add(a.y, b.y);
|
| 252 |
+
c.z = add(a.z, b.z);
|
| 253 |
+
c.w = add(a.w, b.w);
|
| 254 |
+
return c;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 258 |
+
|
| 259 |
+
inline __device__ uint16_t float_to_half(float f)
|
| 260 |
+
{
|
| 261 |
+
union {
|
| 262 |
+
uint32_t u32;
|
| 263 |
+
uint16_t u16[2];
|
| 264 |
+
} tmp;
|
| 265 |
+
#if 0 && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 // Is it better?
|
| 266 |
+
float zero = 0.f;
|
| 267 |
+
asm volatile("cvt.rn.f16x2.f32 %0, %1, %2;\n" : "=r"(tmp.u32) : "f"(zero), "f"(f));
|
| 268 |
+
#else
|
| 269 |
+
asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f));
|
| 270 |
+
#endif
|
| 271 |
+
return tmp.u16[0];
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 275 |
+
|
| 276 |
+
inline __device__ uint32_t float2_to_half2(float2 f)
|
| 277 |
+
{
|
| 278 |
+
union {
|
| 279 |
+
uint32_t u32;
|
| 280 |
+
uint16_t u16[2];
|
| 281 |
+
} tmp;
|
| 282 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
|
| 283 |
+
asm volatile("cvt.rn.f16x2.f32 %0, %1, %2;\n" : "=r"(tmp.u32) : "f"(f.y), "f"(f.x));
|
| 284 |
+
#else
|
| 285 |
+
asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[0]) : "f"(f.x));
|
| 286 |
+
asm volatile("cvt.rn.f16.f32 %0, %1;\n" : "=h"(tmp.u16[1]) : "f"(f.y));
|
| 287 |
+
#endif
|
| 288 |
+
return tmp.u32;
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 292 |
+
|
| 293 |
+
inline __device__ float half_to_float(uint16_t h)
|
| 294 |
+
{
|
| 295 |
+
float f;
|
| 296 |
+
asm volatile("cvt.f32.f16 %0, %1;\n" : "=f"(f) : "h"(h));
|
| 297 |
+
return f;
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 301 |
+
|
| 302 |
+
inline __device__ float2 half2_to_float2(uint32_t v)
|
| 303 |
+
{
|
| 304 |
+
uint16_t lo, hi;
|
| 305 |
+
asm volatile("mov.b32 {%0, %1}, %2;\n" : "=h"(lo), "=h"(hi) : "r"(v));
|
| 306 |
+
return make_float2(half_to_float(lo), half_to_float(hi));
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 310 |
+
|
| 311 |
+
inline __device__ float add(float a, uint16_t b)
|
| 312 |
+
{
|
| 313 |
+
return a + half_to_float(b);
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 317 |
+
|
| 318 |
+
#ifdef ENABLE_BF16
|
| 319 |
+
inline __device__ float add(float a, __nv_bfloat16 b)
|
| 320 |
+
{
|
| 321 |
+
return a + __bfloat162float(b);
|
| 322 |
+
}
|
| 323 |
+
#endif
|
| 324 |
+
|
| 325 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 326 |
+
|
| 327 |
+
inline __device__ float2 add(uint32_t a, float2 fb)
|
| 328 |
+
{
|
| 329 |
+
float2 fa = half2_to_float2(a);
|
| 330 |
+
return add(fa, fb);
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 334 |
+
|
| 335 |
+
inline __device__ Float4_ add(uint2 a, Float4_ fb)
|
| 336 |
+
{
|
| 337 |
+
Float4_ fc;
|
| 338 |
+
fc.x = add(a.x, fb.x);
|
| 339 |
+
fc.y = add(a.y, fb.y);
|
| 340 |
+
return fc;
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 344 |
+
|
| 345 |
+
inline __device__ Float8_ add(uint4 a, Float8_ fb)
|
| 346 |
+
{
|
| 347 |
+
Float8_ fc;
|
| 348 |
+
fc.x = add(a.x, fb.x);
|
| 349 |
+
fc.y = add(a.y, fb.y);
|
| 350 |
+
fc.z = add(a.z, fb.z);
|
| 351 |
+
fc.w = add(a.w, fb.w);
|
| 352 |
+
return fc;
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 356 |
+
|
| 357 |
+
inline __device__ uint32_t h0_h0(uint16_t a)
|
| 358 |
+
{
|
| 359 |
+
uint32_t b;
|
| 360 |
+
asm volatile("mov.b32 %0, {%1, %1};" : "=r"(b) : "h"(a));
|
| 361 |
+
return b;
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 365 |
+
|
| 366 |
+
inline __device__ float fma(float a, float b, float c)
|
| 367 |
+
{
|
| 368 |
+
return a * b + c;
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 372 |
+
|
| 373 |
+
inline __device__ float2 fma(float2 a, float2 b, float2 c)
|
| 374 |
+
{
|
| 375 |
+
float2 d;
|
| 376 |
+
d.x = fma(a.x, b.x, c.x);
|
| 377 |
+
d.y = fma(a.y, b.y, c.y);
|
| 378 |
+
return d;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 382 |
+
|
| 383 |
+
inline __device__ float2 fma(float a, float2 b, float2 c)
|
| 384 |
+
{
|
| 385 |
+
float2 d;
|
| 386 |
+
d.x = fma(a, b.x, c.x);
|
| 387 |
+
d.y = fma(a, b.y, c.y);
|
| 388 |
+
return d;
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 392 |
+
|
| 393 |
+
inline __device__ float4 fma(float4 a, float4 b, float4 c)
|
| 394 |
+
{
|
| 395 |
+
float4 d;
|
| 396 |
+
d.x = fma(a.x, b.x, c.x);
|
| 397 |
+
d.y = fma(a.y, b.y, c.y);
|
| 398 |
+
d.z = fma(a.z, b.z, c.z);
|
| 399 |
+
d.w = fma(a.w, b.w, c.w);
|
| 400 |
+
return d;
|
| 401 |
+
}
|
| 402 |
+
|
| 403 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 404 |
+
|
| 405 |
+
inline __device__ float4 fma(float a, float4 b, float4 c)
|
| 406 |
+
{
|
| 407 |
+
float4 d;
|
| 408 |
+
d.x = fma(a, b.x, c.x);
|
| 409 |
+
d.y = fma(a, b.y, c.y);
|
| 410 |
+
d.z = fma(a, b.z, c.z);
|
| 411 |
+
d.w = fma(a, b.w, c.w);
|
| 412 |
+
return d;
|
| 413 |
+
}
|
| 414 |
+
|
| 415 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 416 |
+
|
| 417 |
+
inline __device__ Float4_ fma(float a, Float4_ b, Float4_ c)
|
| 418 |
+
{
|
| 419 |
+
Float4_ d;
|
| 420 |
+
d.x = fma(a, b.x, c.x);
|
| 421 |
+
d.y = fma(a, b.y, c.y);
|
| 422 |
+
return d;
|
| 423 |
+
}
|
| 424 |
+
|
| 425 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 426 |
+
|
| 427 |
+
inline __device__ Float8_ fma(float a, Float8_ b, Float8_ c)
|
| 428 |
+
{
|
| 429 |
+
Float8_ d;
|
| 430 |
+
d.x = fma(a, b.x, c.x);
|
| 431 |
+
d.y = fma(a, b.y, c.y);
|
| 432 |
+
d.z = fma(a, b.z, c.z);
|
| 433 |
+
d.w = fma(a, b.w, c.w);
|
| 434 |
+
return d;
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 438 |
+
|
| 439 |
+
#ifdef ENABLE_BF16
|
| 440 |
+
inline __device__ float2 add(__nv_bfloat162 a, float2 fb)
|
| 441 |
+
{
|
| 442 |
+
float2 fa = bf1622float2(a);
|
| 443 |
+
return add(fa, fb);
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 447 |
+
|
| 448 |
+
inline __device__ Float4_ add(bf16_4_t a, Float4_ fb)
|
| 449 |
+
{
|
| 450 |
+
Float4_ fc;
|
| 451 |
+
fc.x = add(a.x, fb.x);
|
| 452 |
+
fc.y = add(a.y, fb.y);
|
| 453 |
+
return fc;
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 457 |
+
|
| 458 |
+
inline __device__ Float8_ add(bf16_8_t a, Float8_ fb)
|
| 459 |
+
{
|
| 460 |
+
Float8_ fc;
|
| 461 |
+
fc.x = add(a.x, fb.x);
|
| 462 |
+
fc.y = add(a.y, fb.y);
|
| 463 |
+
fc.z = add(a.z, fb.z);
|
| 464 |
+
fc.w = add(a.w, fb.w);
|
| 465 |
+
return fc;
|
| 466 |
+
}
|
| 467 |
+
#endif // ENABLE_BF16
|
| 468 |
+
|
| 469 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 470 |
+
|
| 471 |
+
inline __device__ uint32_t fma(uint32_t a, uint32_t b, uint32_t c)
|
| 472 |
+
{
|
| 473 |
+
uint32_t d;
|
| 474 |
+
asm volatile("fma.rn.f16x2 %0, %1, %2, %3;\n" : "=r"(d) : "r"(a), "r"(b), "r"(c));
|
| 475 |
+
return d;
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 479 |
+
|
| 480 |
+
inline __device__ uint32_t fma(uint16_t a, uint32_t b, uint32_t c)
|
| 481 |
+
{
|
| 482 |
+
return fma(h0_h0(a), b, c);
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 486 |
+
|
| 487 |
+
inline __device__ uint2 fma(uint2 a, uint2 b, uint2 c)
|
| 488 |
+
{
|
| 489 |
+
uint2 d;
|
| 490 |
+
d.x = fma(a.x, b.x, c.x);
|
| 491 |
+
d.y = fma(a.y, b.y, c.y);
|
| 492 |
+
return d;
|
| 493 |
+
}
|
| 494 |
+
|
| 495 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 496 |
+
|
| 497 |
+
inline __device__ uint2 fma(uint16_t a, uint2 b, uint2 c)
|
| 498 |
+
{
|
| 499 |
+
uint32_t s = h0_h0(a);
|
| 500 |
+
uint2 d;
|
| 501 |
+
d.x = fma(s, b.x, c.x);
|
| 502 |
+
d.y = fma(s, b.y, c.y);
|
| 503 |
+
return d;
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 507 |
+
|
| 508 |
+
inline __device__ uint4 fma(uint4 a, uint4 b, uint4 c)
|
| 509 |
+
{
|
| 510 |
+
uint4 d;
|
| 511 |
+
d.x = fma(a.x, b.x, c.x);
|
| 512 |
+
d.y = fma(a.y, b.y, c.y);
|
| 513 |
+
d.z = fma(a.z, b.z, c.z);
|
| 514 |
+
d.w = fma(a.w, b.w, c.w);
|
| 515 |
+
return d;
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 519 |
+
|
| 520 |
+
inline __device__ uint4 fma(uint16_t a, uint4 b, uint4 c)
|
| 521 |
+
{
|
| 522 |
+
uint32_t s = h0_h0(a);
|
| 523 |
+
uint4 d;
|
| 524 |
+
d.x = fma(s, b.x, c.x);
|
| 525 |
+
d.y = fma(s, b.y, c.y);
|
| 526 |
+
d.z = fma(s, b.z, c.z);
|
| 527 |
+
d.w = fma(s, b.w, c.w);
|
| 528 |
+
return d;
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 532 |
+
|
| 533 |
+
inline __device__ float fma(uint16_t a, uint16_t b, float fc)
|
| 534 |
+
{
|
| 535 |
+
float fa = half_to_float(a);
|
| 536 |
+
float fb = half_to_float(b);
|
| 537 |
+
return fa * fb + fc;
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 541 |
+
|
| 542 |
+
inline __device__ float2 fma(uint32_t a, uint32_t b, float2 fc)
|
| 543 |
+
{
|
| 544 |
+
float2 fa = half2_to_float2(a);
|
| 545 |
+
float2 fb = half2_to_float2(b);
|
| 546 |
+
return fma(fa, fb, fc);
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 550 |
+
|
| 551 |
+
inline __device__ float2 fma(uint16_t a, uint32_t b, float2 fc)
|
| 552 |
+
{
|
| 553 |
+
return fma(h0_h0(a), b, fc);
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 557 |
+
|
| 558 |
+
inline __device__ Float4_ fma(uint2 a, uint2 b, Float4_ fc)
|
| 559 |
+
{
|
| 560 |
+
Float4_ fd;
|
| 561 |
+
fd.x = fma(a.x, b.x, fc.x);
|
| 562 |
+
fd.y = fma(a.y, b.y, fc.y);
|
| 563 |
+
return fd;
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 567 |
+
|
| 568 |
+
inline __device__ Float4_ fma(uint16_t a, uint2 b, Float4_ fc)
|
| 569 |
+
{
|
| 570 |
+
uint32_t s = h0_h0(a);
|
| 571 |
+
Float4_ fd;
|
| 572 |
+
fd.x = fma(s, b.x, fc.x);
|
| 573 |
+
fd.y = fma(s, b.y, fc.y);
|
| 574 |
+
return fd;
|
| 575 |
+
}
|
| 576 |
+
|
| 577 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 578 |
+
|
| 579 |
+
inline __device__ Float8_ fma(uint4 a, uint4 b, Float8_ fc)
|
| 580 |
+
{
|
| 581 |
+
Float8_ fd;
|
| 582 |
+
fd.x = fma(a.x, b.x, fc.x);
|
| 583 |
+
fd.y = fma(a.y, b.y, fc.y);
|
| 584 |
+
fd.z = fma(a.z, b.z, fc.z);
|
| 585 |
+
fd.w = fma(a.w, b.w, fc.w);
|
| 586 |
+
return fd;
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 590 |
+
|
| 591 |
+
inline __device__ Float8_ fma(uint16_t a, uint4 b, Float8_ fc)
|
| 592 |
+
{
|
| 593 |
+
uint32_t s = h0_h0(a);
|
| 594 |
+
Float8_ fd;
|
| 595 |
+
fd.x = fma(s, b.x, fc.x);
|
| 596 |
+
fd.y = fma(s, b.y, fc.y);
|
| 597 |
+
fd.z = fma(s, b.z, fc.z);
|
| 598 |
+
fd.w = fma(s, b.w, fc.w);
|
| 599 |
+
return fd;
|
| 600 |
+
}
|
| 601 |
+
|
| 602 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 603 |
+
#ifdef ENABLE_BF16
|
| 604 |
+
inline __device__ __nv_bfloat162 fma(__nv_bfloat162 a, __nv_bfloat162 b, __nv_bfloat162 c)
|
| 605 |
+
{
|
| 606 |
+
return bf16hfma2(a, b, c);
|
| 607 |
+
}
|
| 608 |
+
|
| 609 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 610 |
+
|
| 611 |
+
inline __device__ __nv_bfloat162 fma(__nv_bfloat16 a, __nv_bfloat162 b, __nv_bfloat162 c)
|
| 612 |
+
{
|
| 613 |
+
return bf16hfma2(bf162bf162(a), b, c);
|
| 614 |
+
}
|
| 615 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 616 |
+
|
| 617 |
+
inline __device__ bf16_4_t fma(bf16_4_t a, bf16_4_t b, bf16_4_t c)
|
| 618 |
+
{
|
| 619 |
+
bf16_4_t d;
|
| 620 |
+
d.x = fma(a.x, b.x, c.x);
|
| 621 |
+
d.y = fma(a.y, b.y, c.y);
|
| 622 |
+
return d;
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 626 |
+
|
| 627 |
+
inline __device__ bf16_4_t fma(__nv_bfloat16 a, bf16_4_t b, bf16_4_t c)
|
| 628 |
+
{
|
| 629 |
+
__nv_bfloat162 s = bf162bf162(a);
|
| 630 |
+
bf16_4_t d;
|
| 631 |
+
d.x = fma(s, b.x, c.x);
|
| 632 |
+
d.y = fma(s, b.y, c.y);
|
| 633 |
+
return d;
|
| 634 |
+
}
|
| 635 |
+
|
| 636 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 637 |
+
|
| 638 |
+
inline __device__ bf16_8_t fma(bf16_8_t a, bf16_8_t b, bf16_8_t c)
|
| 639 |
+
{
|
| 640 |
+
bf16_8_t d;
|
| 641 |
+
d.x = fma(a.x, b.x, c.x);
|
| 642 |
+
d.y = fma(a.y, b.y, c.y);
|
| 643 |
+
d.z = fma(a.z, b.z, c.z);
|
| 644 |
+
d.w = fma(a.w, b.w, c.w);
|
| 645 |
+
return d;
|
| 646 |
+
}
|
| 647 |
+
|
| 648 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 649 |
+
|
| 650 |
+
inline __device__ bf16_8_t fma(__nv_bfloat16 a, bf16_8_t b, bf16_8_t c)
|
| 651 |
+
{
|
| 652 |
+
__nv_bfloat162 s = bf162bf162(a);
|
| 653 |
+
bf16_8_t d;
|
| 654 |
+
d.x = fma(s, b.x, c.x);
|
| 655 |
+
d.y = fma(s, b.y, c.y);
|
| 656 |
+
d.z = fma(s, b.z, c.z);
|
| 657 |
+
d.w = fma(s, b.w, c.w);
|
| 658 |
+
return d;
|
| 659 |
+
}
|
| 660 |
+
|
| 661 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 662 |
+
|
| 663 |
+
inline __device__ float fma(__nv_bfloat16 a, __nv_bfloat16 b, float fc)
|
| 664 |
+
{
|
| 665 |
+
return __bfloat162float(a) * __bfloat162float(b) + fc;
|
| 666 |
+
}
|
| 667 |
+
|
| 668 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 669 |
+
|
| 670 |
+
inline __device__ float2 fma(__nv_bfloat162 a, __nv_bfloat162 b, float2 fc)
|
| 671 |
+
{
|
| 672 |
+
float2 fa = bf1622float2(a);
|
| 673 |
+
float2 fb = bf1622float2(b);
|
| 674 |
+
return fma(fa, fb, fc);
|
| 675 |
+
}
|
| 676 |
+
|
| 677 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 678 |
+
|
| 679 |
+
inline __device__ float2 fma(__nv_bfloat16 a, __nv_bfloat162 b, float2 fc)
|
| 680 |
+
{
|
| 681 |
+
return fma(bf162bf162(a), b, fc);
|
| 682 |
+
}
|
| 683 |
+
|
| 684 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 685 |
+
|
| 686 |
+
inline __device__ Float4_ fma(bf16_4_t a, bf16_4_t b, Float4_ fc)
|
| 687 |
+
{
|
| 688 |
+
Float4_ fd;
|
| 689 |
+
fd.x = fma(a.x, b.x, fc.x);
|
| 690 |
+
fd.y = fma(a.y, b.y, fc.y);
|
| 691 |
+
return fd;
|
| 692 |
+
}
|
| 693 |
+
|
| 694 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 695 |
+
|
| 696 |
+
inline __device__ Float4_ fma(__nv_bfloat16 a, bf16_4_t b, Float4_ fc)
|
| 697 |
+
{
|
| 698 |
+
__nv_bfloat162 s = bf162bf162(a);
|
| 699 |
+
Float4_ fd;
|
| 700 |
+
fd.x = fma(s, b.x, fc.x);
|
| 701 |
+
fd.y = fma(s, b.y, fc.y);
|
| 702 |
+
return fd;
|
| 703 |
+
}
|
| 704 |
+
|
| 705 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 706 |
+
|
| 707 |
+
inline __device__ Float8_ fma(bf16_8_t a, bf16_8_t b, Float8_ fc)
|
| 708 |
+
{
|
| 709 |
+
Float8_ fd;
|
| 710 |
+
fd.x = fma(a.x, b.x, fc.x);
|
| 711 |
+
fd.y = fma(a.y, b.y, fc.y);
|
| 712 |
+
fd.z = fma(a.z, b.z, fc.z);
|
| 713 |
+
fd.w = fma(a.w, b.w, fc.w);
|
| 714 |
+
return fd;
|
| 715 |
+
}
|
| 716 |
+
|
| 717 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 718 |
+
|
| 719 |
+
inline __device__ Float8_ fma(__nv_bfloat16 a, bf16_8_t b, Float8_ fc)
|
| 720 |
+
{
|
| 721 |
+
__nv_bfloat162 s = bf162bf162(a);
|
| 722 |
+
Float8_ fd;
|
| 723 |
+
fd.x = fma(s, b.x, fc.x);
|
| 724 |
+
fd.y = fma(s, b.y, fc.y);
|
| 725 |
+
fd.z = fma(s, b.z, fc.z);
|
| 726 |
+
fd.w = fma(s, b.w, fc.w);
|
| 727 |
+
return fd;
|
| 728 |
+
}
|
| 729 |
+
#endif // ENABLE_BF16
|
| 730 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 731 |
+
|
| 732 |
+
template<typename Acc, typename A, typename B>
|
| 733 |
+
inline __device__ Acc mul(A a, B b)
|
| 734 |
+
{
|
| 735 |
+
return a * b;
|
| 736 |
+
}
|
| 737 |
+
|
| 738 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 739 |
+
|
| 740 |
+
template<>
|
| 741 |
+
inline __device__ float mul<float, float>(float a, float b)
|
| 742 |
+
{
|
| 743 |
+
return a * b;
|
| 744 |
+
}
|
| 745 |
+
|
| 746 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 747 |
+
|
| 748 |
+
template<>
|
| 749 |
+
inline __device__ float2 mul(float2 a, float2 b)
|
| 750 |
+
{
|
| 751 |
+
float2 c;
|
| 752 |
+
c.x = a.x * b.x;
|
| 753 |
+
c.y = a.y * b.y;
|
| 754 |
+
return c;
|
| 755 |
+
}
|
| 756 |
+
|
| 757 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 758 |
+
|
| 759 |
+
template<>
|
| 760 |
+
inline __device__ float2 mul(float a, float2 b)
|
| 761 |
+
{
|
| 762 |
+
float2 c;
|
| 763 |
+
c.x = a * b.x;
|
| 764 |
+
c.y = a * b.y;
|
| 765 |
+
return c;
|
| 766 |
+
}
|
| 767 |
+
|
| 768 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 769 |
+
|
| 770 |
+
template<>
|
| 771 |
+
inline __device__ float4 mul(float4 a, float4 b)
|
| 772 |
+
{
|
| 773 |
+
float4 c;
|
| 774 |
+
c.x = a.x * b.x;
|
| 775 |
+
c.y = a.y * b.y;
|
| 776 |
+
c.z = a.z * b.z;
|
| 777 |
+
c.w = a.w * b.w;
|
| 778 |
+
return c;
|
| 779 |
+
}
|
| 780 |
+
|
| 781 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 782 |
+
|
| 783 |
+
template<>
|
| 784 |
+
inline __device__ float4 mul(float a, float4 b)
|
| 785 |
+
{
|
| 786 |
+
float4 c;
|
| 787 |
+
c.x = a * b.x;
|
| 788 |
+
c.y = a * b.y;
|
| 789 |
+
c.z = a * b.z;
|
| 790 |
+
c.w = a * b.w;
|
| 791 |
+
return c;
|
| 792 |
+
}
|
| 793 |
+
|
| 794 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 795 |
+
|
| 796 |
+
template<>
|
| 797 |
+
inline __device__ Float8_ mul(float a, Float8_ b)
|
| 798 |
+
{
|
| 799 |
+
Float8_ c;
|
| 800 |
+
c.x = make_float2(a * b.x.x, a * b.x.y);
|
| 801 |
+
c.y = make_float2(a * b.y.x, a * b.y.y);
|
| 802 |
+
c.z = make_float2(a * b.z.x, a * b.z.y);
|
| 803 |
+
c.w = make_float2(a * b.w.x, a * b.w.y);
|
| 804 |
+
return c;
|
| 805 |
+
}
|
| 806 |
+
|
| 807 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 808 |
+
|
| 809 |
+
template<>
|
| 810 |
+
inline __device__ uint16_t mul(uint16_t a, uint16_t b)
|
| 811 |
+
{
|
| 812 |
+
uint16_t c;
|
| 813 |
+
asm volatile("mul.f16 %0, %1, %2;\n" : "=h"(c) : "h"(a), "h"(b));
|
| 814 |
+
return c;
|
| 815 |
+
}
|
| 816 |
+
|
| 817 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 818 |
+
|
| 819 |
+
template<>
|
| 820 |
+
inline __device__ uint32_t mul(uint32_t a, uint32_t b)
|
| 821 |
+
{
|
| 822 |
+
uint32_t c;
|
| 823 |
+
asm volatile("mul.f16x2 %0, %1, %2;\n" : "=r"(c) : "r"(a), "r"(b));
|
| 824 |
+
return c;
|
| 825 |
+
}
|
| 826 |
+
|
| 827 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 828 |
+
|
| 829 |
+
template<>
|
| 830 |
+
inline __device__ uint32_t mul(uint16_t a, uint32_t b)
|
| 831 |
+
{
|
| 832 |
+
return mul<uint32_t, uint32_t, uint32_t>(h0_h0(a), b);
|
| 833 |
+
}
|
| 834 |
+
|
| 835 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 836 |
+
|
| 837 |
+
template<>
|
| 838 |
+
inline __device__ uint2 mul(uint2 a, uint2 b)
|
| 839 |
+
{
|
| 840 |
+
uint2 c;
|
| 841 |
+
c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x);
|
| 842 |
+
c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y);
|
| 843 |
+
return c;
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 847 |
+
|
| 848 |
+
template<>
|
| 849 |
+
inline __device__ uint2 mul(uint16_t a, uint2 b)
|
| 850 |
+
{
|
| 851 |
+
uint32_t s = h0_h0(a);
|
| 852 |
+
uint2 c;
|
| 853 |
+
c.x = mul<uint32_t, uint32_t, uint32_t>(s, b.x);
|
| 854 |
+
c.y = mul<uint32_t, uint32_t, uint32_t>(s, b.y);
|
| 855 |
+
return c;
|
| 856 |
+
}
|
| 857 |
+
|
| 858 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 859 |
+
|
| 860 |
+
template<>
|
| 861 |
+
inline __device__ uint4 mul(uint4 a, uint4 b)
|
| 862 |
+
{
|
| 863 |
+
uint4 c;
|
| 864 |
+
c.x = mul<uint32_t, uint32_t, uint32_t>(a.x, b.x);
|
| 865 |
+
c.y = mul<uint32_t, uint32_t, uint32_t>(a.y, b.y);
|
| 866 |
+
c.z = mul<uint32_t, uint32_t, uint32_t>(a.z, b.z);
|
| 867 |
+
c.w = mul<uint32_t, uint32_t, uint32_t>(a.w, b.w);
|
| 868 |
+
return c;
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 872 |
+
|
| 873 |
+
template<>
|
| 874 |
+
inline __device__ uint4 mul(uint16_t a, uint4 b)
|
| 875 |
+
{
|
| 876 |
+
uint32_t s = h0_h0(a);
|
| 877 |
+
uint4 c;
|
| 878 |
+
c.x = mul<uint32_t, uint32_t, uint32_t>(s, b.x);
|
| 879 |
+
c.y = mul<uint32_t, uint32_t, uint32_t>(s, b.y);
|
| 880 |
+
c.z = mul<uint32_t, uint32_t, uint32_t>(s, b.z);
|
| 881 |
+
c.w = mul<uint32_t, uint32_t, uint32_t>(s, b.w);
|
| 882 |
+
return c;
|
| 883 |
+
}
|
| 884 |
+
|
| 885 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 886 |
+
|
| 887 |
+
template<>
|
| 888 |
+
inline __device__ float mul(uint16_t a, uint16_t b)
|
| 889 |
+
{
|
| 890 |
+
float fa = half_to_float(a);
|
| 891 |
+
float fb = half_to_float(b);
|
| 892 |
+
return fa * fb;
|
| 893 |
+
}
|
| 894 |
+
|
| 895 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 896 |
+
|
| 897 |
+
template<>
|
| 898 |
+
inline __device__ float mul(uint16_t a, float b)
|
| 899 |
+
{
|
| 900 |
+
return half_to_float(a) * b;
|
| 901 |
+
}
|
| 902 |
+
|
| 903 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 904 |
+
|
| 905 |
+
template<>
|
| 906 |
+
inline __device__ float2 mul(uint32_t a, uint32_t b)
|
| 907 |
+
{
|
| 908 |
+
float2 fa = half2_to_float2(a);
|
| 909 |
+
float2 fb = half2_to_float2(b);
|
| 910 |
+
return mul<float2, float2, float2>(fa, fb);
|
| 911 |
+
}
|
| 912 |
+
|
| 913 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 914 |
+
|
| 915 |
+
template<>
|
| 916 |
+
inline __device__ float2 mul(uint16_t a, uint32_t b)
|
| 917 |
+
{
|
| 918 |
+
return mul<float2, uint32_t, uint32_t>(h0_h0(a), b);
|
| 919 |
+
}
|
| 920 |
+
|
| 921 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 922 |
+
|
| 923 |
+
template<>
|
| 924 |
+
inline __device__ Float4_ mul(uint2 a, uint2 b)
|
| 925 |
+
{
|
| 926 |
+
Float4_ fc;
|
| 927 |
+
fc.x = mul<float2, uint32_t, uint32_t>(a.x, b.x);
|
| 928 |
+
fc.y = mul<float2, uint32_t, uint32_t>(a.y, b.y);
|
| 929 |
+
return fc;
|
| 930 |
+
}
|
| 931 |
+
|
| 932 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 933 |
+
|
| 934 |
+
template<>
|
| 935 |
+
inline __device__ Float4_ mul(uint16_t a, uint2 b)
|
| 936 |
+
{
|
| 937 |
+
uint32_t s = h0_h0(a);
|
| 938 |
+
Float4_ fc;
|
| 939 |
+
fc.x = mul<float2, uint32_t, uint32_t>(s, b.x);
|
| 940 |
+
fc.y = mul<float2, uint32_t, uint32_t>(s, b.y);
|
| 941 |
+
return fc;
|
| 942 |
+
}
|
| 943 |
+
|
| 944 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 945 |
+
|
| 946 |
+
template<>
|
| 947 |
+
inline __device__ Float8_ mul(uint4 a, uint4 b)
|
| 948 |
+
{
|
| 949 |
+
Float8_ fc;
|
| 950 |
+
fc.x = mul<float2, uint32_t, uint32_t>(a.x, b.x);
|
| 951 |
+
fc.y = mul<float2, uint32_t, uint32_t>(a.y, b.y);
|
| 952 |
+
fc.z = mul<float2, uint32_t, uint32_t>(a.z, b.z);
|
| 953 |
+
fc.w = mul<float2, uint32_t, uint32_t>(a.w, b.w);
|
| 954 |
+
return fc;
|
| 955 |
+
}
|
| 956 |
+
|
| 957 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 958 |
+
|
| 959 |
+
template<>
|
| 960 |
+
inline __device__ Float8_ mul(uint16_t a, uint4 b)
|
| 961 |
+
{
|
| 962 |
+
uint32_t s = h0_h0(a);
|
| 963 |
+
Float8_ fc;
|
| 964 |
+
fc.x = mul<float2, uint32_t, uint32_t>(s, b.x);
|
| 965 |
+
fc.y = mul<float2, uint32_t, uint32_t>(s, b.y);
|
| 966 |
+
fc.z = mul<float2, uint32_t, uint32_t>(s, b.z);
|
| 967 |
+
fc.w = mul<float2, uint32_t, uint32_t>(s, b.w);
|
| 968 |
+
return fc;
|
| 969 |
+
}
|
| 970 |
+
|
| 971 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 972 |
+
|
| 973 |
+
#ifdef ENABLE_BF16
|
| 974 |
+
template<>
|
| 975 |
+
inline __device__ __nv_bfloat16 mul(__nv_bfloat16 a, __nv_bfloat16 b)
|
| 976 |
+
{
|
| 977 |
+
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
|
| 978 |
+
return __hmul(a, b);
|
| 979 |
+
#else
|
| 980 |
+
return bf16hmul(a, b);
|
| 981 |
+
#endif
|
| 982 |
+
}
|
| 983 |
+
|
| 984 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 985 |
+
|
| 986 |
+
template<>
|
| 987 |
+
inline __device__ __nv_bfloat162 mul(__nv_bfloat162 a, __nv_bfloat162 b)
|
| 988 |
+
{
|
| 989 |
+
return bf16hmul2(a, b);
|
| 990 |
+
}
|
| 991 |
+
|
| 992 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 993 |
+
|
| 994 |
+
template<>
|
| 995 |
+
inline __device__ __nv_bfloat162 mul(__nv_bfloat16 a, __nv_bfloat162 b)
|
| 996 |
+
{
|
| 997 |
+
return mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(bf162bf162(a), b);
|
| 998 |
+
}
|
| 999 |
+
|
| 1000 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1001 |
+
|
| 1002 |
+
template<>
|
| 1003 |
+
inline __device__ bf16_4_t mul(bf16_4_t a, bf16_4_t b)
|
| 1004 |
+
{
|
| 1005 |
+
bf16_4_t c;
|
| 1006 |
+
c.x = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.x, b.x);
|
| 1007 |
+
c.y = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.y, b.y);
|
| 1008 |
+
return c;
|
| 1009 |
+
}
|
| 1010 |
+
|
| 1011 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1012 |
+
|
| 1013 |
+
template<>
|
| 1014 |
+
inline __device__ bf16_4_t mul(__nv_bfloat16 a, bf16_4_t b)
|
| 1015 |
+
{
|
| 1016 |
+
__nv_bfloat162 s = bf162bf162(a);
|
| 1017 |
+
bf16_4_t c;
|
| 1018 |
+
c.x = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.x);
|
| 1019 |
+
c.y = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.y);
|
| 1020 |
+
return c;
|
| 1021 |
+
}
|
| 1022 |
+
|
| 1023 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1024 |
+
|
| 1025 |
+
template<>
|
| 1026 |
+
inline __device__ bf16_8_t mul(bf16_8_t a, bf16_8_t b)
|
| 1027 |
+
{
|
| 1028 |
+
bf16_8_t c;
|
| 1029 |
+
c.x = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.x, b.x);
|
| 1030 |
+
c.y = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.y, b.y);
|
| 1031 |
+
c.z = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.z, b.z);
|
| 1032 |
+
c.w = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(a.w, b.w);
|
| 1033 |
+
return c;
|
| 1034 |
+
}
|
| 1035 |
+
|
| 1036 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1037 |
+
|
| 1038 |
+
template<>
|
| 1039 |
+
inline __device__ bf16_8_t mul(__nv_bfloat16 a, bf16_8_t b)
|
| 1040 |
+
{
|
| 1041 |
+
__nv_bfloat162 s = bf162bf162(a);
|
| 1042 |
+
bf16_8_t c;
|
| 1043 |
+
c.x = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.x);
|
| 1044 |
+
c.y = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.y);
|
| 1045 |
+
c.z = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.z);
|
| 1046 |
+
c.w = mul<__nv_bfloat162, __nv_bfloat162, __nv_bfloat162>(s, b.w);
|
| 1047 |
+
return c;
|
| 1048 |
+
}
|
| 1049 |
+
|
| 1050 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1051 |
+
|
| 1052 |
+
template<>
|
| 1053 |
+
inline __device__ float mul(__nv_bfloat16 a, __nv_bfloat16 b)
|
| 1054 |
+
{
|
| 1055 |
+
float fa = (float)a;
|
| 1056 |
+
float fb = (float)b;
|
| 1057 |
+
return fa * fb;
|
| 1058 |
+
}
|
| 1059 |
+
|
| 1060 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1061 |
+
|
| 1062 |
+
template<>
|
| 1063 |
+
inline __device__ float mul(__nv_bfloat16 a, float b)
|
| 1064 |
+
{
|
| 1065 |
+
return __bfloat162float(a) * b;
|
| 1066 |
+
}
|
| 1067 |
+
|
| 1068 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1069 |
+
|
| 1070 |
+
template<>
|
| 1071 |
+
inline __device__ float2 mul(__nv_bfloat162 a, __nv_bfloat162 b)
|
| 1072 |
+
{
|
| 1073 |
+
float2 fa = bf1622float2(a);
|
| 1074 |
+
float2 fb = bf1622float2(b);
|
| 1075 |
+
return mul<float2, float2, float2>(fa, fb);
|
| 1076 |
+
}
|
| 1077 |
+
|
| 1078 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1079 |
+
|
| 1080 |
+
template<>
|
| 1081 |
+
inline __device__ float2 mul(__nv_bfloat16 a, __nv_bfloat162 b)
|
| 1082 |
+
{
|
| 1083 |
+
return mul<float2, __nv_bfloat162, __nv_bfloat162>(bf162bf162(a), b);
|
| 1084 |
+
}
|
| 1085 |
+
|
| 1086 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1087 |
+
|
| 1088 |
+
template<>
|
| 1089 |
+
inline __device__ Float4_ mul(bf16_4_t a, bf16_4_t b)
|
| 1090 |
+
{
|
| 1091 |
+
Float4_ fc;
|
| 1092 |
+
fc.x = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.x, b.x);
|
| 1093 |
+
fc.y = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.y, b.y);
|
| 1094 |
+
return fc;
|
| 1095 |
+
}
|
| 1096 |
+
|
| 1097 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1098 |
+
|
| 1099 |
+
template<>
|
| 1100 |
+
inline __device__ Float4_ mul(__nv_bfloat16 a, bf16_4_t b)
|
| 1101 |
+
{
|
| 1102 |
+
__nv_bfloat162 s = bf162bf162(a);
|
| 1103 |
+
Float4_ fc;
|
| 1104 |
+
fc.x = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.x);
|
| 1105 |
+
fc.y = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.y);
|
| 1106 |
+
return fc;
|
| 1107 |
+
}
|
| 1108 |
+
|
| 1109 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1110 |
+
|
| 1111 |
+
template<>
|
| 1112 |
+
inline __device__ Float8_ mul(bf16_8_t a, bf16_8_t b)
|
| 1113 |
+
{
|
| 1114 |
+
Float8_ fc;
|
| 1115 |
+
fc.x = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.x, b.x);
|
| 1116 |
+
fc.y = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.y, b.y);
|
| 1117 |
+
fc.z = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.z, b.z);
|
| 1118 |
+
fc.w = mul<float2, __nv_bfloat162, __nv_bfloat162>(a.w, b.w);
|
| 1119 |
+
return fc;
|
| 1120 |
+
}
|
| 1121 |
+
|
| 1122 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1123 |
+
|
| 1124 |
+
template<>
|
| 1125 |
+
inline __device__ Float8_ mul(__nv_bfloat16 a, bf16_8_t b)
|
| 1126 |
+
{
|
| 1127 |
+
__nv_bfloat162 s = bf162bf162(a);
|
| 1128 |
+
Float8_ fc;
|
| 1129 |
+
fc.x = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.x);
|
| 1130 |
+
fc.y = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.y);
|
| 1131 |
+
fc.z = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.z);
|
| 1132 |
+
fc.w = mul<float2, __nv_bfloat162, __nv_bfloat162>(s, b.w);
|
| 1133 |
+
return fc;
|
| 1134 |
+
}
|
| 1135 |
+
#endif // ENABLE_BF16
|
| 1136 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1137 |
+
|
| 1138 |
+
inline __device__ float sum(float v)
|
| 1139 |
+
{
|
| 1140 |
+
return v;
|
| 1141 |
+
}
|
| 1142 |
+
|
| 1143 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1144 |
+
|
| 1145 |
+
inline __device__ float sum(float2 v)
|
| 1146 |
+
{
|
| 1147 |
+
return v.x + v.y;
|
| 1148 |
+
}
|
| 1149 |
+
|
| 1150 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1151 |
+
|
| 1152 |
+
inline __device__ float sum(float4 v)
|
| 1153 |
+
{
|
| 1154 |
+
return v.x + v.y + v.z + v.w;
|
| 1155 |
+
}
|
| 1156 |
+
|
| 1157 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1158 |
+
|
| 1159 |
+
#ifdef ENABLE_BF16
|
| 1160 |
+
inline __device__ float sum(__nv_bfloat162 v)
|
| 1161 |
+
{
|
| 1162 |
+
float2 vf = bf1622float2(v);
|
| 1163 |
+
return vf.x + vf.y;
|
| 1164 |
+
}
|
| 1165 |
+
|
| 1166 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1167 |
+
|
| 1168 |
+
inline __device__ float sum(bf16_4_t v)
|
| 1169 |
+
{
|
| 1170 |
+
return sum(v.x) + sum(v.y);
|
| 1171 |
+
}
|
| 1172 |
+
|
| 1173 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1174 |
+
|
| 1175 |
+
inline __device__ float sum(bf16_8_t v)
|
| 1176 |
+
{
|
| 1177 |
+
return sum(v.x) + sum(v.y) + sum(v.z) + sum(v.w);
|
| 1178 |
+
}
|
| 1179 |
+
#endif // ENABLE_BF16
|
| 1180 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1181 |
+
|
| 1182 |
+
inline __device__ float sum(uint16_t v)
|
| 1183 |
+
{
|
| 1184 |
+
return half_to_float(v);
|
| 1185 |
+
}
|
| 1186 |
+
|
| 1187 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1188 |
+
|
| 1189 |
+
inline __device__ float sum(uint32_t v)
|
| 1190 |
+
{
|
| 1191 |
+
float2 tmp = half2_to_float2(v);
|
| 1192 |
+
return tmp.x + tmp.y;
|
| 1193 |
+
}
|
| 1194 |
+
|
| 1195 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1196 |
+
|
| 1197 |
+
inline __device__ float sum(uint2 v)
|
| 1198 |
+
{
|
| 1199 |
+
uint32_t c = add(v.x, v.y);
|
| 1200 |
+
return sum(c);
|
| 1201 |
+
}
|
| 1202 |
+
|
| 1203 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1204 |
+
|
| 1205 |
+
inline __device__ float sum(uint4 v)
|
| 1206 |
+
{
|
| 1207 |
+
#if 1
|
| 1208 |
+
uint32_t c = add(v.x, v.y);
|
| 1209 |
+
c = add(c, v.z);
|
| 1210 |
+
c = add(c, v.w);
|
| 1211 |
+
#else
|
| 1212 |
+
uint32_t c = add(v.x, v.y);
|
| 1213 |
+
uint32_t d = add(v.z, v.w);
|
| 1214 |
+
c = add(c, d);
|
| 1215 |
+
#endif
|
| 1216 |
+
return sum(c);
|
| 1217 |
+
}
|
| 1218 |
+
|
| 1219 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1220 |
+
|
| 1221 |
+
inline __device__ float sum(Float4_ v)
|
| 1222 |
+
{
|
| 1223 |
+
return v.x.x + v.x.y + v.y.x + v.y.y;
|
| 1224 |
+
}
|
| 1225 |
+
|
| 1226 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1227 |
+
|
| 1228 |
+
inline __device__ float sum(Float8_ v)
|
| 1229 |
+
{
|
| 1230 |
+
return v.x.x + v.x.y + v.y.x + v.y.y + v.z.x + v.z.y + v.w.x + v.w.y;
|
| 1231 |
+
}
|
| 1232 |
+
|
| 1233 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1234 |
+
|
| 1235 |
+
template<typename T>
|
| 1236 |
+
inline __device__ float dot(T a, T b)
|
| 1237 |
+
{
|
| 1238 |
+
return sum(mul<T, T, T>(a, b));
|
| 1239 |
+
}
|
| 1240 |
+
|
| 1241 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1242 |
+
|
| 1243 |
+
template<typename A, typename T>
|
| 1244 |
+
inline __device__ float dot(T a, T b)
|
| 1245 |
+
{
|
| 1246 |
+
return sum(mul<A, T, T>(a, b));
|
| 1247 |
+
}
|
| 1248 |
+
|
| 1249 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1250 |
+
|
| 1251 |
+
inline __device__ void zero(uint16_t& dst)
|
| 1252 |
+
{
|
| 1253 |
+
dst = uint16_t(0);
|
| 1254 |
+
}
|
| 1255 |
+
|
| 1256 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1257 |
+
|
| 1258 |
+
template<typename T>
|
| 1259 |
+
inline __device__ void zero(T& dst)
|
| 1260 |
+
{
|
| 1261 |
+
constexpr int WORDS = sizeof(T) / 4;
|
| 1262 |
+
union {
|
| 1263 |
+
T raw;
|
| 1264 |
+
uint32_t words[WORDS];
|
| 1265 |
+
} tmp;
|
| 1266 |
+
#pragma unroll
|
| 1267 |
+
for (int ii = 0; ii < WORDS; ++ii) {
|
| 1268 |
+
tmp.words[ii] = 0u;
|
| 1269 |
+
}
|
| 1270 |
+
dst = tmp.raw;
|
| 1271 |
+
}
|
| 1272 |
+
|
| 1273 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 1274 |
+
|
| 1275 |
+
inline __device__ float2 rotary_embedding_coefficient(const int zid, const int rot_embed_dim, const int t_step, const float base)
|
| 1276 |
+
{
|
| 1277 |
+
const float pos_idx_inv_freq = t_step / pow(base, zid / (float)rot_embed_dim);
|
| 1278 |
+
return {cos(pos_idx_inv_freq), sin(pos_idx_inv_freq)};
|
| 1279 |
+
}
|
| 1280 |
+
|
| 1281 |
+
inline __device__ float2 rotary_embedding_transform(const float2 v, const float2 coef)
|
| 1282 |
+
{
|
| 1283 |
+
float2 rot_v;
|
| 1284 |
+
rot_v.x = coef.x * v.x - coef.y * v.y;
|
| 1285 |
+
rot_v.y = coef.x * v.y + coef.y * v.x;
|
| 1286 |
+
return rot_v;
|
| 1287 |
+
}
|
| 1288 |
+
|
| 1289 |
+
inline __device__ uint32_t rotary_embedding_transform(const uint32_t v, const float2 coef)
|
| 1290 |
+
{
|
| 1291 |
+
float2 fv = half2_to_float2(v);
|
| 1292 |
+
float2 rot_fv = rotary_embedding_transform(fv, coef);
|
| 1293 |
+
return float2_to_half2(rot_fv);
|
| 1294 |
+
}
|
| 1295 |
+
|
| 1296 |
+
#ifdef ENABLE_BF16
|
| 1297 |
+
inline __device__ __nv_bfloat162 rotary_embedding_transform(const __nv_bfloat162 v, const float2 coef)
|
| 1298 |
+
{
|
| 1299 |
+
float2 fv = bf1622float2(v);
|
| 1300 |
+
float2 rot_fv = rotary_embedding_transform(fv, coef);
|
| 1301 |
+
return __floats2bfloat162_rn(rot_fv.x, rot_fv.y);
|
| 1302 |
+
}
|
| 1303 |
+
#endif
|
| 1304 |
+
|
| 1305 |
+
inline __device__ void apply_rotary_embedding(float& q, int zid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1306 |
+
{
|
| 1307 |
+
return;
|
| 1308 |
+
}
|
| 1309 |
+
|
| 1310 |
+
inline __device__ void apply_rotary_embedding(float& q, float& k, int zid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1311 |
+
{
|
| 1312 |
+
return;
|
| 1313 |
+
}
|
| 1314 |
+
|
| 1315 |
+
inline __device__ void apply_rotary_embedding(float2& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1316 |
+
{
|
| 1317 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1318 |
+
return;
|
| 1319 |
+
}
|
| 1320 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
|
| 1321 |
+
q = rotary_embedding_transform(q, coef);
|
| 1322 |
+
}
|
| 1323 |
+
|
| 1324 |
+
inline __device__ void apply_rotary_embedding(float2& q, float2& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1325 |
+
{
|
| 1326 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1327 |
+
return;
|
| 1328 |
+
}
|
| 1329 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
|
| 1330 |
+
q = rotary_embedding_transform(q, coef);
|
| 1331 |
+
k = rotary_embedding_transform(k, coef);
|
| 1332 |
+
}
|
| 1333 |
+
|
| 1334 |
+
inline __device__ void apply_rotary_embedding(float4& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1335 |
+
{
|
| 1336 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1337 |
+
return;
|
| 1338 |
+
}
|
| 1339 |
+
|
| 1340 |
+
Float4_& q_ = *reinterpret_cast<Float4_*>(&q);
|
| 1341 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
|
| 1342 |
+
q_.x = rotary_embedding_transform(q_.x, coef0);
|
| 1343 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
|
| 1344 |
+
q_.y = rotary_embedding_transform(q_.y, coef1);
|
| 1345 |
+
}
|
| 1346 |
+
|
| 1347 |
+
inline __device__ void apply_rotary_embedding(float4& q, float4& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1348 |
+
{
|
| 1349 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1350 |
+
return;
|
| 1351 |
+
}
|
| 1352 |
+
|
| 1353 |
+
Float4_& q_ = *reinterpret_cast<Float4_*>(&q);
|
| 1354 |
+
Float4_& k_ = *reinterpret_cast<Float4_*>(&k);
|
| 1355 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
|
| 1356 |
+
q_.x = rotary_embedding_transform(q_.x, coef0);
|
| 1357 |
+
k_.x = rotary_embedding_transform(k_.x, coef0);
|
| 1358 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
|
| 1359 |
+
q_.y = rotary_embedding_transform(q_.y, coef1);
|
| 1360 |
+
k_.y = rotary_embedding_transform(k_.y, coef1);
|
| 1361 |
+
}
|
| 1362 |
+
|
| 1363 |
+
inline __device__ void apply_rotary_embedding(uint32_t& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1364 |
+
{
|
| 1365 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1366 |
+
return;
|
| 1367 |
+
}
|
| 1368 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
|
| 1369 |
+
q = rotary_embedding_transform(q, coef);
|
| 1370 |
+
}
|
| 1371 |
+
|
| 1372 |
+
inline __device__ void apply_rotary_embedding(uint32_t& q, uint32_t& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1373 |
+
{
|
| 1374 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1375 |
+
return;
|
| 1376 |
+
}
|
| 1377 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
|
| 1378 |
+
q = rotary_embedding_transform(q, coef);
|
| 1379 |
+
k = rotary_embedding_transform(k, coef);
|
| 1380 |
+
}
|
| 1381 |
+
|
| 1382 |
+
inline __device__ void apply_rotary_embedding(uint2& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1383 |
+
{
|
| 1384 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1385 |
+
return;
|
| 1386 |
+
}
|
| 1387 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
|
| 1388 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1389 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
|
| 1390 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1391 |
+
}
|
| 1392 |
+
|
| 1393 |
+
inline __device__ void apply_rotary_embedding(uint2& q, uint2& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1394 |
+
{
|
| 1395 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1396 |
+
return;
|
| 1397 |
+
}
|
| 1398 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
|
| 1399 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1400 |
+
k.x = rotary_embedding_transform(k.x, coef0);
|
| 1401 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
|
| 1402 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1403 |
+
k.y = rotary_embedding_transform(k.y, coef1);
|
| 1404 |
+
}
|
| 1405 |
+
|
| 1406 |
+
inline __device__ void apply_rotary_embedding(uint4& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1407 |
+
{
|
| 1408 |
+
if (8 * tid >= rot_embed_dim) {
|
| 1409 |
+
return;
|
| 1410 |
+
}
|
| 1411 |
+
const auto coef0 = rotary_embedding_coefficient(8 * tid, rot_embed_dim, t_step, base);
|
| 1412 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1413 |
+
const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, rot_embed_dim, t_step, base);
|
| 1414 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1415 |
+
const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, rot_embed_dim, t_step, base);
|
| 1416 |
+
q.z = rotary_embedding_transform(q.z, coef2);
|
| 1417 |
+
const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, rot_embed_dim, t_step, base);
|
| 1418 |
+
q.w = rotary_embedding_transform(q.w, coef3);
|
| 1419 |
+
}
|
| 1420 |
+
|
| 1421 |
+
inline __device__ void apply_rotary_embedding(uint4& q, uint4& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1422 |
+
{
|
| 1423 |
+
if (8 * tid >= rot_embed_dim) {
|
| 1424 |
+
return;
|
| 1425 |
+
}
|
| 1426 |
+
const auto coef0 = rotary_embedding_coefficient(8 * tid, rot_embed_dim, t_step, base);
|
| 1427 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1428 |
+
k.x = rotary_embedding_transform(k.x, coef0);
|
| 1429 |
+
const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, rot_embed_dim, t_step, base);
|
| 1430 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1431 |
+
k.y = rotary_embedding_transform(k.y, coef1);
|
| 1432 |
+
const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, rot_embed_dim, t_step, base);
|
| 1433 |
+
q.z = rotary_embedding_transform(q.z, coef2);
|
| 1434 |
+
k.z = rotary_embedding_transform(k.z, coef2);
|
| 1435 |
+
const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, rot_embed_dim, t_step, base);
|
| 1436 |
+
q.w = rotary_embedding_transform(q.w, coef3);
|
| 1437 |
+
k.w = rotary_embedding_transform(k.w, coef3);
|
| 1438 |
+
}
|
| 1439 |
+
|
| 1440 |
+
#ifdef ENABLE_BF16
|
| 1441 |
+
inline __device__ void apply_rotary_embedding(__nv_bfloat162& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1442 |
+
{
|
| 1443 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1444 |
+
return;
|
| 1445 |
+
}
|
| 1446 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
|
| 1447 |
+
q = rotary_embedding_transform(q, coef);
|
| 1448 |
+
}
|
| 1449 |
+
|
| 1450 |
+
inline __device__ void apply_rotary_embedding(__nv_bfloat162& q, __nv_bfloat162& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1451 |
+
{
|
| 1452 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1453 |
+
return;
|
| 1454 |
+
}
|
| 1455 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, rot_embed_dim, t_step, base);
|
| 1456 |
+
q = rotary_embedding_transform(q, coef);
|
| 1457 |
+
k = rotary_embedding_transform(k, coef);
|
| 1458 |
+
}
|
| 1459 |
+
|
| 1460 |
+
inline __device__ void apply_rotary_embedding(bf16_4_t& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1461 |
+
{
|
| 1462 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1463 |
+
return;
|
| 1464 |
+
}
|
| 1465 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
|
| 1466 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1467 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
|
| 1468 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1469 |
+
}
|
| 1470 |
+
|
| 1471 |
+
inline __device__ void apply_rotary_embedding(bf16_4_t& q, bf16_4_t& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1472 |
+
{
|
| 1473 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1474 |
+
return;
|
| 1475 |
+
}
|
| 1476 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, rot_embed_dim, t_step, base);
|
| 1477 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1478 |
+
k.x = rotary_embedding_transform(k.x, coef0);
|
| 1479 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, rot_embed_dim, t_step, base);
|
| 1480 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1481 |
+
k.y = rotary_embedding_transform(k.y, coef1);
|
| 1482 |
+
}
|
| 1483 |
+
|
| 1484 |
+
inline __device__ void apply_rotary_embedding(bf16_8_t& q, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1485 |
+
{
|
| 1486 |
+
if (8 * tid >= rot_embed_dim) {
|
| 1487 |
+
return;
|
| 1488 |
+
}
|
| 1489 |
+
const auto coef0 = rotary_embedding_coefficient(8 * tid, rot_embed_dim, t_step, base);
|
| 1490 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1491 |
+
const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, rot_embed_dim, t_step, base);
|
| 1492 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1493 |
+
const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, rot_embed_dim, t_step, base);
|
| 1494 |
+
q.z = rotary_embedding_transform(q.z, coef2);
|
| 1495 |
+
const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, rot_embed_dim, t_step, base);
|
| 1496 |
+
q.w = rotary_embedding_transform(q.w, coef3);
|
| 1497 |
+
}
|
| 1498 |
+
|
| 1499 |
+
inline __device__ void apply_rotary_embedding(bf16_8_t& q, bf16_8_t& k, int tid, int rot_embed_dim, int t_step, const float base=10000.0f)
|
| 1500 |
+
{
|
| 1501 |
+
if (8 * tid >= rot_embed_dim) {
|
| 1502 |
+
return;
|
| 1503 |
+
}
|
| 1504 |
+
const auto coef0 = rotary_embedding_coefficient(8 * tid, rot_embed_dim, t_step, base);
|
| 1505 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1506 |
+
k.x = rotary_embedding_transform(k.x, coef0);
|
| 1507 |
+
const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, rot_embed_dim, t_step, base);
|
| 1508 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1509 |
+
k.y = rotary_embedding_transform(k.y, coef1);
|
| 1510 |
+
const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, rot_embed_dim, t_step, base);
|
| 1511 |
+
q.z = rotary_embedding_transform(q.z, coef2);
|
| 1512 |
+
k.z = rotary_embedding_transform(k.z, coef2);
|
| 1513 |
+
const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, rot_embed_dim, t_step, base);
|
| 1514 |
+
q.w = rotary_embedding_transform(q.w, coef3);
|
| 1515 |
+
k.w = rotary_embedding_transform(k.w, coef3);
|
| 1516 |
+
}
|
| 1517 |
+
#endif // ENABLE_BF16
|
| 1518 |
+
|
| 1519 |
+
template <typename T>
|
| 1520 |
+
inline __device__ float2 rotary_embedding_coefficient(const int zid, const int t_step, const T* rotary_cos, const T* rotary_sin)
|
| 1521 |
+
{
|
| 1522 |
+
// zid is the index of the dimension (0, 2, 4, ..., rotary_dim).
|
| 1523 |
+
// rotary_cos/sin stores those at index 0, 1, 2, ..., rotary_dim / 2.
|
| 1524 |
+
return {float(rotary_cos[zid / 2]), float(rotary_sin[zid / 2])};
|
| 1525 |
+
}
|
| 1526 |
+
|
| 1527 |
+
// fp16 is special because we use uint16_t for reading the data, for backward compatibility.
|
| 1528 |
+
template <>
|
| 1529 |
+
inline __device__ float2 rotary_embedding_coefficient<uint16_t>(const int zid, const int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
|
| 1530 |
+
{
|
| 1531 |
+
// zid is the index of the dimension (0, 2, 4, ..., rotary_dim).
|
| 1532 |
+
// rotary_cos/sin stores those at index 0, 1, 2, ..., rotary_dim / 2.
|
| 1533 |
+
return {float(reinterpret_cast<const __half*>(rotary_cos)[zid / 2]),
|
| 1534 |
+
float(reinterpret_cast<const __half*>(rotary_sin)[zid / 2])};
|
| 1535 |
+
}
|
| 1536 |
+
|
| 1537 |
+
inline __device__ void apply_rotary_embedding(float& q, int zid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
|
| 1538 |
+
{
|
| 1539 |
+
return;
|
| 1540 |
+
}
|
| 1541 |
+
|
| 1542 |
+
inline __device__ void apply_rotary_embedding(float& q, float& k, int zid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
|
| 1543 |
+
{
|
| 1544 |
+
return;
|
| 1545 |
+
}
|
| 1546 |
+
|
| 1547 |
+
inline __device__ void apply_rotary_embedding(float2& q, int tid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
|
| 1548 |
+
{
|
| 1549 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1550 |
+
return;
|
| 1551 |
+
}
|
| 1552 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
|
| 1553 |
+
q = rotary_embedding_transform(q, coef);
|
| 1554 |
+
}
|
| 1555 |
+
|
| 1556 |
+
inline __device__ void apply_rotary_embedding(float2& q, float2& k, int tid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
|
| 1557 |
+
{
|
| 1558 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1559 |
+
return;
|
| 1560 |
+
}
|
| 1561 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
|
| 1562 |
+
q = rotary_embedding_transform(q, coef);
|
| 1563 |
+
k = rotary_embedding_transform(k, coef);
|
| 1564 |
+
}
|
| 1565 |
+
|
| 1566 |
+
inline __device__ void apply_rotary_embedding(float4& q, int tid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
|
| 1567 |
+
{
|
| 1568 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1569 |
+
return;
|
| 1570 |
+
}
|
| 1571 |
+
|
| 1572 |
+
Float4_& q_ = *reinterpret_cast<Float4_*>(&q);
|
| 1573 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
|
| 1574 |
+
q_.x = rotary_embedding_transform(q_.x, coef0);
|
| 1575 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1576 |
+
q_.y = rotary_embedding_transform(q_.y, coef1);
|
| 1577 |
+
}
|
| 1578 |
+
|
| 1579 |
+
inline __device__ void apply_rotary_embedding(float4& q, float4& k, int tid, int rot_embed_dim, int t_step, const float* rotary_cos, const float* rotary_sin)
|
| 1580 |
+
{
|
| 1581 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1582 |
+
return;
|
| 1583 |
+
}
|
| 1584 |
+
|
| 1585 |
+
Float4_& q_ = *reinterpret_cast<Float4_*>(&q);
|
| 1586 |
+
Float4_& k_ = *reinterpret_cast<Float4_*>(&k);
|
| 1587 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
|
| 1588 |
+
q_.x = rotary_embedding_transform(q_.x, coef0);
|
| 1589 |
+
k_.x = rotary_embedding_transform(k_.x, coef0);
|
| 1590 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1591 |
+
q_.y = rotary_embedding_transform(q_.y, coef1);
|
| 1592 |
+
k_.y = rotary_embedding_transform(k_.y, coef1);
|
| 1593 |
+
}
|
| 1594 |
+
|
| 1595 |
+
inline __device__ void apply_rotary_embedding(uint32_t& q, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
|
| 1596 |
+
{
|
| 1597 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1598 |
+
return;
|
| 1599 |
+
}
|
| 1600 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
|
| 1601 |
+
q = rotary_embedding_transform(q, coef);
|
| 1602 |
+
}
|
| 1603 |
+
|
| 1604 |
+
inline __device__ void apply_rotary_embedding(uint32_t& q, uint32_t& k, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
|
| 1605 |
+
{
|
| 1606 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1607 |
+
return;
|
| 1608 |
+
}
|
| 1609 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
|
| 1610 |
+
q = rotary_embedding_transform(q, coef);
|
| 1611 |
+
k = rotary_embedding_transform(k, coef);
|
| 1612 |
+
}
|
| 1613 |
+
|
| 1614 |
+
inline __device__ void apply_rotary_embedding(uint2& q, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
|
| 1615 |
+
{
|
| 1616 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1617 |
+
return;
|
| 1618 |
+
}
|
| 1619 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
|
| 1620 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1621 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1622 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1623 |
+
}
|
| 1624 |
+
|
| 1625 |
+
inline __device__ void apply_rotary_embedding(uint2& q, uint2& k, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
|
| 1626 |
+
{
|
| 1627 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1628 |
+
return;
|
| 1629 |
+
}
|
| 1630 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
|
| 1631 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1632 |
+
k.x = rotary_embedding_transform(k.x, coef0);
|
| 1633 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1634 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1635 |
+
k.y = rotary_embedding_transform(k.y, coef1);
|
| 1636 |
+
}
|
| 1637 |
+
|
| 1638 |
+
inline __device__ void apply_rotary_embedding(uint4& q, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
|
| 1639 |
+
{
|
| 1640 |
+
if (8 * tid >= rot_embed_dim) {
|
| 1641 |
+
return;
|
| 1642 |
+
}
|
| 1643 |
+
const auto coef0 = rotary_embedding_coefficient(8 * tid, t_step, rotary_cos, rotary_sin);
|
| 1644 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1645 |
+
const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1646 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1647 |
+
const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, t_step, rotary_cos, rotary_sin);
|
| 1648 |
+
q.z = rotary_embedding_transform(q.z, coef2);
|
| 1649 |
+
const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, t_step, rotary_cos, rotary_sin);
|
| 1650 |
+
q.w = rotary_embedding_transform(q.w, coef3);
|
| 1651 |
+
}
|
| 1652 |
+
|
| 1653 |
+
inline __device__ void apply_rotary_embedding(uint4& q, uint4& k, int tid, int rot_embed_dim, int t_step, const uint16_t* rotary_cos, const uint16_t* rotary_sin)
|
| 1654 |
+
{
|
| 1655 |
+
if (8 * tid >= rot_embed_dim) {
|
| 1656 |
+
return;
|
| 1657 |
+
}
|
| 1658 |
+
const auto coef0 = rotary_embedding_coefficient(8 * tid, t_step, rotary_cos, rotary_sin);
|
| 1659 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1660 |
+
k.x = rotary_embedding_transform(k.x, coef0);
|
| 1661 |
+
const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1662 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1663 |
+
k.y = rotary_embedding_transform(k.y, coef1);
|
| 1664 |
+
const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, t_step, rotary_cos, rotary_sin);
|
| 1665 |
+
q.z = rotary_embedding_transform(q.z, coef2);
|
| 1666 |
+
k.z = rotary_embedding_transform(k.z, coef2);
|
| 1667 |
+
const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, t_step, rotary_cos, rotary_sin);
|
| 1668 |
+
q.w = rotary_embedding_transform(q.w, coef3);
|
| 1669 |
+
k.w = rotary_embedding_transform(k.w, coef3);
|
| 1670 |
+
}
|
| 1671 |
+
|
| 1672 |
+
#ifdef ENABLE_BF16
|
| 1673 |
+
inline __device__ void apply_rotary_embedding(__nv_bfloat162& q, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
|
| 1674 |
+
{
|
| 1675 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1676 |
+
return;
|
| 1677 |
+
}
|
| 1678 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
|
| 1679 |
+
q = rotary_embedding_transform(q, coef);
|
| 1680 |
+
}
|
| 1681 |
+
|
| 1682 |
+
inline __device__ void apply_rotary_embedding(__nv_bfloat162& q, __nv_bfloat162& k, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
|
| 1683 |
+
{
|
| 1684 |
+
if (2 * tid >= rot_embed_dim) {
|
| 1685 |
+
return;
|
| 1686 |
+
}
|
| 1687 |
+
const auto coef = rotary_embedding_coefficient(2 * tid, t_step, rotary_cos, rotary_sin);
|
| 1688 |
+
q = rotary_embedding_transform(q, coef);
|
| 1689 |
+
k = rotary_embedding_transform(k, coef);
|
| 1690 |
+
}
|
| 1691 |
+
|
| 1692 |
+
inline __device__ void apply_rotary_embedding(bf16_4_t& q, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
|
| 1693 |
+
{
|
| 1694 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1695 |
+
return;
|
| 1696 |
+
}
|
| 1697 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
|
| 1698 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1699 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1700 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1701 |
+
}
|
| 1702 |
+
|
| 1703 |
+
inline __device__ void apply_rotary_embedding(bf16_4_t& q, bf16_4_t& k, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
|
| 1704 |
+
{
|
| 1705 |
+
if (4 * tid >= rot_embed_dim) {
|
| 1706 |
+
return;
|
| 1707 |
+
}
|
| 1708 |
+
const auto coef0 = rotary_embedding_coefficient(4 * tid, t_step, rotary_cos, rotary_sin);
|
| 1709 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1710 |
+
k.x = rotary_embedding_transform(k.x, coef0);
|
| 1711 |
+
const auto coef1 = rotary_embedding_coefficient(4 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1712 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1713 |
+
k.y = rotary_embedding_transform(k.y, coef1);
|
| 1714 |
+
}
|
| 1715 |
+
|
| 1716 |
+
inline __device__ void apply_rotary_embedding(bf16_8_t& q, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
|
| 1717 |
+
{
|
| 1718 |
+
if (8 * tid >= rot_embed_dim) {
|
| 1719 |
+
return;
|
| 1720 |
+
}
|
| 1721 |
+
const auto coef0 = rotary_embedding_coefficient(8 * tid, t_step, rotary_cos, rotary_sin);
|
| 1722 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1723 |
+
const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1724 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1725 |
+
const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, t_step, rotary_cos, rotary_sin);
|
| 1726 |
+
q.z = rotary_embedding_transform(q.z, coef2);
|
| 1727 |
+
const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, t_step, rotary_cos, rotary_sin);
|
| 1728 |
+
q.w = rotary_embedding_transform(q.w, coef3);
|
| 1729 |
+
}
|
| 1730 |
+
|
| 1731 |
+
inline __device__ void apply_rotary_embedding(bf16_8_t& q, bf16_8_t& k, int tid, int rot_embed_dim, int t_step, const __nv_bfloat16* rotary_cos, const __nv_bfloat16* rotary_sin)
|
| 1732 |
+
{
|
| 1733 |
+
if (8 * tid >= rot_embed_dim) {
|
| 1734 |
+
return;
|
| 1735 |
+
}
|
| 1736 |
+
const auto coef0 = rotary_embedding_coefficient(8 * tid, t_step, rotary_cos, rotary_sin);
|
| 1737 |
+
q.x = rotary_embedding_transform(q.x, coef0);
|
| 1738 |
+
k.x = rotary_embedding_transform(k.x, coef0);
|
| 1739 |
+
const auto coef1 = rotary_embedding_coefficient(8 * tid + 2, t_step, rotary_cos, rotary_sin);
|
| 1740 |
+
q.y = rotary_embedding_transform(q.y, coef1);
|
| 1741 |
+
k.y = rotary_embedding_transform(k.y, coef1);
|
| 1742 |
+
const auto coef2 = rotary_embedding_coefficient(8 * tid + 4, t_step, rotary_cos, rotary_sin);
|
| 1743 |
+
q.z = rotary_embedding_transform(q.z, coef2);
|
| 1744 |
+
k.z = rotary_embedding_transform(k.z, coef2);
|
| 1745 |
+
const auto coef3 = rotary_embedding_coefficient(8 * tid + 6, t_step, rotary_cos, rotary_sin);
|
| 1746 |
+
q.w = rotary_embedding_transform(q.w, coef3);
|
| 1747 |
+
k.w = rotary_embedding_transform(k.w, coef3);
|
| 1748 |
+
}
|
| 1749 |
+
#endif // ENABLE_BF16
|
| 1750 |
+
|
| 1751 |
+
template<typename Vec_T, typename T>
|
| 1752 |
+
__device__ __inline__ void vec_from_smem_transpose(Vec_T& vec, T* smem, int transpose_idx, int smem_pitch);
|
| 1753 |
+
|
| 1754 |
+
template<>
|
| 1755 |
+
__device__ __inline__ void vec_from_smem_transpose(float& vec, float* smem, int transpose_idx, int smem_pitch)
|
| 1756 |
+
{
|
| 1757 |
+
return;
|
| 1758 |
+
}
|
| 1759 |
+
|
| 1760 |
+
template<>
|
| 1761 |
+
__device__ __inline__ void vec_from_smem_transpose(uint32_t& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
|
| 1762 |
+
{
|
| 1763 |
+
union {
|
| 1764 |
+
uint32_t u32;
|
| 1765 |
+
uint16_t u16[2];
|
| 1766 |
+
} tmp;
|
| 1767 |
+
tmp.u16[0] = smem[transpose_idx];
|
| 1768 |
+
tmp.u16[1] = smem[smem_pitch + transpose_idx];
|
| 1769 |
+
|
| 1770 |
+
vec = tmp.u32;
|
| 1771 |
+
}
|
| 1772 |
+
|
| 1773 |
+
template<>
|
| 1774 |
+
__device__ __inline__ void vec_from_smem_transpose(uint2& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
|
| 1775 |
+
{
|
| 1776 |
+
union {
|
| 1777 |
+
uint32_t u32;
|
| 1778 |
+
uint16_t u16[2];
|
| 1779 |
+
} tmp_1, tmp_2;
|
| 1780 |
+
tmp_1.u32 = *reinterpret_cast<uint32_t*>(&smem[transpose_idx]);
|
| 1781 |
+
tmp_2.u32 = *reinterpret_cast<uint32_t*>(&smem[smem_pitch + transpose_idx]);
|
| 1782 |
+
|
| 1783 |
+
union {
|
| 1784 |
+
uint2 u32x2;
|
| 1785 |
+
uint16_t u16[4];
|
| 1786 |
+
} tmp_3;
|
| 1787 |
+
tmp_3.u16[0] = tmp_1.u16[0];
|
| 1788 |
+
tmp_3.u16[1] = tmp_2.u16[0];
|
| 1789 |
+
tmp_3.u16[2] = tmp_1.u16[1];
|
| 1790 |
+
tmp_3.u16[3] = tmp_2.u16[1];
|
| 1791 |
+
|
| 1792 |
+
vec = tmp_3.u32x2;
|
| 1793 |
+
}
|
| 1794 |
+
|
| 1795 |
+
template<>
|
| 1796 |
+
__device__ __inline__ void vec_from_smem_transpose(uint4& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
|
| 1797 |
+
{
|
| 1798 |
+
union {
|
| 1799 |
+
uint64_t u64;
|
| 1800 |
+
uint16_t u16[4];
|
| 1801 |
+
} tmp_1, tmp_2;
|
| 1802 |
+
tmp_1.u64 = *reinterpret_cast<uint64_t*>(&smem[transpose_idx]);
|
| 1803 |
+
tmp_2.u64 = *reinterpret_cast<uint64_t*>(&smem[smem_pitch + transpose_idx]);
|
| 1804 |
+
|
| 1805 |
+
union {
|
| 1806 |
+
uint4 u32x4;
|
| 1807 |
+
uint16_t u16[8];
|
| 1808 |
+
} tmp_3;
|
| 1809 |
+
tmp_3.u16[0] = tmp_1.u16[0];
|
| 1810 |
+
tmp_3.u16[1] = tmp_2.u16[0];
|
| 1811 |
+
tmp_3.u16[2] = tmp_1.u16[1];
|
| 1812 |
+
tmp_3.u16[3] = tmp_2.u16[1];
|
| 1813 |
+
tmp_3.u16[4] = tmp_1.u16[2];
|
| 1814 |
+
tmp_3.u16[5] = tmp_2.u16[2];
|
| 1815 |
+
tmp_3.u16[6] = tmp_1.u16[3];
|
| 1816 |
+
tmp_3.u16[7] = tmp_2.u16[3];
|
| 1817 |
+
|
| 1818 |
+
vec = tmp_3.u32x4;
|
| 1819 |
+
}
|
| 1820 |
+
|
| 1821 |
+
#ifdef ENABLE_BF16
|
| 1822 |
+
template<>
|
| 1823 |
+
__device__ __inline__ void
|
| 1824 |
+
vec_from_smem_transpose(bf16_4_t& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
|
| 1825 |
+
{
|
| 1826 |
+
union {
|
| 1827 |
+
uint32_t u32;
|
| 1828 |
+
__nv_bfloat16 bf16[2];
|
| 1829 |
+
} tmp_1, tmp_2;
|
| 1830 |
+
tmp_1.u32 = *reinterpret_cast<uint32_t*>(&smem[transpose_idx]);
|
| 1831 |
+
tmp_2.u32 = *reinterpret_cast<uint32_t*>(&smem[smem_pitch + transpose_idx]);
|
| 1832 |
+
|
| 1833 |
+
vec.x = __nv_bfloat162{tmp_1.bf16[0], tmp_2.bf16[0]};
|
| 1834 |
+
vec.y = __nv_bfloat162{tmp_1.bf16[1], tmp_2.bf16[1]};
|
| 1835 |
+
}
|
| 1836 |
+
|
| 1837 |
+
template<>
|
| 1838 |
+
__device__ __inline__ void
|
| 1839 |
+
vec_from_smem_transpose(bf16_8_t& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
|
| 1840 |
+
{
|
| 1841 |
+
union {
|
| 1842 |
+
uint64_t u64;
|
| 1843 |
+
__nv_bfloat16 bf16[4];
|
| 1844 |
+
} tmp_1, tmp_2;
|
| 1845 |
+
tmp_1.u64 = *reinterpret_cast<uint64_t*>(&smem[transpose_idx]);
|
| 1846 |
+
tmp_2.u64 = *reinterpret_cast<uint64_t*>(&smem[smem_pitch + transpose_idx]);
|
| 1847 |
+
|
| 1848 |
+
vec.x = __nv_bfloat162{tmp_1.bf16[0], tmp_2.bf16[0]};
|
| 1849 |
+
vec.y = __nv_bfloat162{tmp_1.bf16[1], tmp_2.bf16[1]};
|
| 1850 |
+
vec.z = __nv_bfloat162{tmp_1.bf16[2], tmp_2.bf16[2]};
|
| 1851 |
+
vec.w = __nv_bfloat162{tmp_1.bf16[3], tmp_2.bf16[3]};
|
| 1852 |
+
}
|
| 1853 |
+
#endif // ENABLE_BF16
|
| 1854 |
+
|
| 1855 |
+
template<>
|
| 1856 |
+
__device__ __inline__ void vec_from_smem_transpose(float4& vec, float* smem, int transpose_idx, int smem_pitch)
|
| 1857 |
+
{
|
| 1858 |
+
vec.x = smem[transpose_idx];
|
| 1859 |
+
vec.z = smem[transpose_idx + 1];
|
| 1860 |
+
vec.y = smem[smem_pitch + transpose_idx];
|
| 1861 |
+
vec.w = smem[smem_pitch + transpose_idx + 1];
|
| 1862 |
+
}
|
| 1863 |
+
|
| 1864 |
+
template<>
|
| 1865 |
+
__device__ __inline__ void vec_from_smem_transpose(uint32_t& vec, half* smem, int transpose_idx, int smem_pitch)
|
| 1866 |
+
{
|
| 1867 |
+
union {
|
| 1868 |
+
uint32_t u32;
|
| 1869 |
+
half u16[2];
|
| 1870 |
+
} tmp;
|
| 1871 |
+
tmp.u16[0] = smem[transpose_idx];
|
| 1872 |
+
tmp.u16[1] = smem[smem_pitch + transpose_idx];
|
| 1873 |
+
|
| 1874 |
+
vec = tmp.u32;
|
| 1875 |
+
}
|
| 1876 |
+
|
| 1877 |
+
#ifdef ENABLE_BF16
|
| 1878 |
+
template<>
|
| 1879 |
+
__device__ __inline__ void
|
| 1880 |
+
vec_from_smem_transpose(__nv_bfloat162& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
|
| 1881 |
+
{
|
| 1882 |
+
vec.x = smem[transpose_idx];
|
| 1883 |
+
vec.y = smem[smem_pitch + transpose_idx];
|
| 1884 |
+
}
|
| 1885 |
+
#endif
|
| 1886 |
+
|
| 1887 |
+
template<>
|
| 1888 |
+
__device__ __inline__ void vec_from_smem_transpose(float2& vec, float* smem, int transpose_idx, int smem_pitch)
|
| 1889 |
+
{
|
| 1890 |
+
vec.x = smem[transpose_idx];
|
| 1891 |
+
vec.y = smem[smem_pitch + transpose_idx];
|
| 1892 |
+
}
|
| 1893 |
+
|
| 1894 |
+
template<typename Vec_T, typename T>
|
| 1895 |
+
__device__ __inline__ void write_smem_transpose(const Vec_T& vec, T* smem, int transpose_idx, int smem_pitch);
|
| 1896 |
+
|
| 1897 |
+
template<>
|
| 1898 |
+
__device__ __inline__ void write_smem_transpose(const float& vec, float* smem, int transpose_idx, int smem_pitch)
|
| 1899 |
+
{
|
| 1900 |
+
return;
|
| 1901 |
+
}
|
| 1902 |
+
|
| 1903 |
+
template<>
|
| 1904 |
+
__device__ __inline__ void write_smem_transpose(const uint4& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
|
| 1905 |
+
{
|
| 1906 |
+
union {
|
| 1907 |
+
uint64_t u64;
|
| 1908 |
+
uint16_t u16[4];
|
| 1909 |
+
} tmp_1, tmp_2;
|
| 1910 |
+
|
| 1911 |
+
union {
|
| 1912 |
+
uint4 u32x4;
|
| 1913 |
+
uint16_t u16[8];
|
| 1914 |
+
} tmp_3;
|
| 1915 |
+
tmp_3.u32x4 = vec;
|
| 1916 |
+
tmp_1.u16[0] = tmp_3.u16[0];
|
| 1917 |
+
tmp_2.u16[0] = tmp_3.u16[1];
|
| 1918 |
+
tmp_1.u16[1] = tmp_3.u16[2];
|
| 1919 |
+
tmp_2.u16[1] = tmp_3.u16[3];
|
| 1920 |
+
tmp_1.u16[2] = tmp_3.u16[4];
|
| 1921 |
+
tmp_2.u16[2] = tmp_3.u16[5];
|
| 1922 |
+
tmp_1.u16[3] = tmp_3.u16[6];
|
| 1923 |
+
tmp_2.u16[3] = tmp_3.u16[7];
|
| 1924 |
+
|
| 1925 |
+
*reinterpret_cast<uint64_t*>(&smem[transpose_idx]) = tmp_1.u64;
|
| 1926 |
+
*reinterpret_cast<uint64_t*>(&smem[smem_pitch + transpose_idx]) = tmp_2.u64;
|
| 1927 |
+
}
|
| 1928 |
+
|
| 1929 |
+
template<>
|
| 1930 |
+
__device__ __inline__ void write_smem_transpose(const uint2& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
|
| 1931 |
+
{
|
| 1932 |
+
union {
|
| 1933 |
+
uint32_t u32;
|
| 1934 |
+
uint16_t u16[2];
|
| 1935 |
+
} tmp_1, tmp_2;
|
| 1936 |
+
|
| 1937 |
+
union {
|
| 1938 |
+
uint2 u32x2;
|
| 1939 |
+
uint16_t u16[4];
|
| 1940 |
+
} tmp_3;
|
| 1941 |
+
tmp_3.u32x2 = vec;
|
| 1942 |
+
tmp_1.u16[0] = tmp_3.u16[0];
|
| 1943 |
+
tmp_2.u16[0] = tmp_3.u16[1];
|
| 1944 |
+
tmp_1.u16[1] = tmp_3.u16[2];
|
| 1945 |
+
tmp_2.u16[1] = tmp_3.u16[3];
|
| 1946 |
+
|
| 1947 |
+
*reinterpret_cast<uint32_t*>(&smem[transpose_idx]) = tmp_1.u32;
|
| 1948 |
+
*reinterpret_cast<uint32_t*>(&smem[smem_pitch + transpose_idx]) = tmp_2.u32;
|
| 1949 |
+
}
|
| 1950 |
+
|
| 1951 |
+
template<>
|
| 1952 |
+
__device__ __inline__ void write_smem_transpose(const uint32_t& vec, uint16_t* smem, int transpose_idx, int smem_pitch)
|
| 1953 |
+
{
|
| 1954 |
+
union {
|
| 1955 |
+
uint32_t u32;
|
| 1956 |
+
uint16_t u16[2];
|
| 1957 |
+
} tmp;
|
| 1958 |
+
tmp.u32 = vec;
|
| 1959 |
+
|
| 1960 |
+
smem[transpose_idx] = tmp.u16[0];
|
| 1961 |
+
smem[smem_pitch + transpose_idx] = tmp.u16[1];
|
| 1962 |
+
}
|
| 1963 |
+
|
| 1964 |
+
template<>
|
| 1965 |
+
__device__ __inline__ void write_smem_transpose(const float4& vec, float* smem, int transpose_idx, int smem_pitch)
|
| 1966 |
+
{
|
| 1967 |
+
smem[transpose_idx] = vec.x;
|
| 1968 |
+
smem[transpose_idx + 1] = vec.z;
|
| 1969 |
+
smem[smem_pitch + transpose_idx] = vec.y;
|
| 1970 |
+
smem[smem_pitch + transpose_idx + 1] = vec.w;
|
| 1971 |
+
}
|
| 1972 |
+
|
| 1973 |
+
template<>
|
| 1974 |
+
__device__ __inline__ void write_smem_transpose(const uint32_t& vec, half* smem, int transpose_idx, int smem_pitch)
|
| 1975 |
+
{
|
| 1976 |
+
union {
|
| 1977 |
+
uint32_t u32;
|
| 1978 |
+
half u16[2];
|
| 1979 |
+
} tmp;
|
| 1980 |
+
|
| 1981 |
+
tmp.u32 = vec;
|
| 1982 |
+
smem[transpose_idx] = tmp.u16[0];
|
| 1983 |
+
smem[smem_pitch + transpose_idx] = tmp.u16[1];
|
| 1984 |
+
}
|
| 1985 |
+
|
| 1986 |
+
#ifdef ENABLE_BF16
|
| 1987 |
+
template<>
|
| 1988 |
+
__device__ __inline__ void
|
| 1989 |
+
write_smem_transpose(const __nv_bfloat162& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
|
| 1990 |
+
{
|
| 1991 |
+
smem[transpose_idx] = vec.x;
|
| 1992 |
+
smem[smem_pitch + transpose_idx] = vec.y;
|
| 1993 |
+
}
|
| 1994 |
+
|
| 1995 |
+
template<>
|
| 1996 |
+
__device__ __inline__ void
|
| 1997 |
+
write_smem_transpose(const bf16_4_t& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
|
| 1998 |
+
{
|
| 1999 |
+
write_smem_transpose(reinterpret_cast<const uint2&>(vec), reinterpret_cast<uint16_t*>(smem), transpose_idx, smem_pitch);
|
| 2000 |
+
}
|
| 2001 |
+
|
| 2002 |
+
template<>
|
| 2003 |
+
__device__ __inline__ void
|
| 2004 |
+
write_smem_transpose(const bf16_8_t& vec, __nv_bfloat16* smem, int transpose_idx, int smem_pitch)
|
| 2005 |
+
{
|
| 2006 |
+
write_smem_transpose(reinterpret_cast<const uint4&>(vec), reinterpret_cast<uint16_t*>(smem), transpose_idx, smem_pitch);
|
| 2007 |
+
}
|
| 2008 |
+
#endif
|
| 2009 |
+
|
| 2010 |
+
template<>
|
| 2011 |
+
__device__ __inline__ void write_smem_transpose(const float2& vec, float* smem, int transpose_idx, int smem_pitch)
|
| 2012 |
+
{
|
| 2013 |
+
smem[transpose_idx] = vec.x;
|
| 2014 |
+
smem[smem_pitch + transpose_idx] = vec.y;
|
| 2015 |
+
}
|
| 2016 |
+
|
| 2017 |
+
} // namespace mmha
|
Code/Baselines/flash-attention/csrc/ft_attention/ft_attention.cpp
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <torch/extension.h>
|
| 2 |
+
#include "ATen/cuda/CUDAContext.h"
|
| 3 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
#include "decoder_masked_multihead_attention.h"
|
| 7 |
+
|
| 8 |
+
#define CHECK_DEVICE(x) TORCH_CHECK(x.device().type() == torch::kCUDA, #x " must be on CUDA")
|
| 9 |
+
#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
|
| 10 |
+
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
|
| 11 |
+
|
| 12 |
+
#define DISPATCH_FLOAT_AND_HALF_AND_BF16(TYPE, NAME, ...) \
|
| 13 |
+
if (TYPE == at::ScalarType::Half) { \
|
| 14 |
+
using scalar_t = at::Half; \
|
| 15 |
+
__VA_ARGS__(); \
|
| 16 |
+
} else if (TYPE == at::ScalarType::BFloat16) { \
|
| 17 |
+
using scalar_t = at::BFloat16; \
|
| 18 |
+
__VA_ARGS__(); \
|
| 19 |
+
} else if (TYPE == at::ScalarType::Float) { \
|
| 20 |
+
using scalar_t = float; \
|
| 21 |
+
__VA_ARGS__(); \
|
| 22 |
+
} else { \
|
| 23 |
+
AT_ERROR(#NAME, " not implemented for type '", toString(TYPE), "'"); \
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
template<typename T>
|
| 27 |
+
void masked_multihead_attention(const Masked_multihead_attention_params<T>& params,
|
| 28 |
+
const cudaStream_t& stream);
|
| 29 |
+
|
| 30 |
+
template<typename T>
|
| 31 |
+
void cross_multihead_attention(const Masked_multihead_attention_params<T>& params,
|
| 32 |
+
const cudaStream_t& stream);
|
| 33 |
+
|
| 34 |
+
template<typename T>
|
| 35 |
+
struct SATypeConverter {
|
| 36 |
+
using Type = T;
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
template<>
|
| 40 |
+
struct SATypeConverter<at::Half> {
|
| 41 |
+
using Type = uint16_t;
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
template<>
|
| 45 |
+
struct SATypeConverter<at::BFloat16> {
|
| 46 |
+
using Type = __nv_bfloat16;
|
| 47 |
+
};
|
| 48 |
+
|
| 49 |
+
template <typename T>
|
| 50 |
+
void set_params(Masked_multihead_attention_params<T> ¶ms,
|
| 51 |
+
const size_t batch_size,
|
| 52 |
+
const size_t nheads,
|
| 53 |
+
const size_t nheads_kv,
|
| 54 |
+
const size_t memory_max_seqlen,
|
| 55 |
+
const size_t headdim,
|
| 56 |
+
const int timestep,
|
| 57 |
+
const int rotary_embedding_dim,
|
| 58 |
+
const float rotary_base,
|
| 59 |
+
const bool neox_rotary_style,
|
| 60 |
+
const int q_batch_stride,
|
| 61 |
+
const int k_batch_stride,
|
| 62 |
+
const int v_batch_stride,
|
| 63 |
+
const int nnz_heads,
|
| 64 |
+
T *q_ptr,
|
| 65 |
+
T *k_ptr,
|
| 66 |
+
T *v_ptr,
|
| 67 |
+
T *k_cache_ptr,
|
| 68 |
+
T *v_cache_ptr,
|
| 69 |
+
int *length_per_sample,
|
| 70 |
+
T *rotary_cos,
|
| 71 |
+
T *rotary_sin,
|
| 72 |
+
T *out_ptr,
|
| 73 |
+
int *nnz_head_idx) {
|
| 74 |
+
// Reset the parameters
|
| 75 |
+
memset(¶ms, 0, sizeof(params));
|
| 76 |
+
params.q = q_ptr;
|
| 77 |
+
params.k = k_ptr;
|
| 78 |
+
params.v = v_ptr;
|
| 79 |
+
params.q_bias = nullptr;
|
| 80 |
+
params.k_bias = nullptr;
|
| 81 |
+
params.v_bias = nullptr;
|
| 82 |
+
params.k_cache = k_cache_ptr;
|
| 83 |
+
params.v_cache = v_cache_ptr;
|
| 84 |
+
params.out = out_ptr;
|
| 85 |
+
params.cache_indir = nullptr;
|
| 86 |
+
params.stride_q = q_batch_stride;
|
| 87 |
+
params.stride_k = k_batch_stride;
|
| 88 |
+
params.stride_v = v_batch_stride;
|
| 89 |
+
params.batch_size = batch_size;
|
| 90 |
+
params.beam_width = 1;
|
| 91 |
+
params.memory_max_len = memory_max_seqlen;
|
| 92 |
+
params.num_heads = nheads;
|
| 93 |
+
params.num_heads_kv = nheads_kv;
|
| 94 |
+
params.num_heads_q_kv_ratio = nheads / nheads_kv;
|
| 95 |
+
params.nnz_heads = nnz_heads;
|
| 96 |
+
params.hidden_size_per_head = headdim;
|
| 97 |
+
params.rotary_embedding_dim = rotary_embedding_dim;
|
| 98 |
+
params.rotary_base = rotary_base;
|
| 99 |
+
params.neox_rotary_style = neox_rotary_style;
|
| 100 |
+
params.timestep = timestep;
|
| 101 |
+
params.inv_sqrt_dh = 1.f / sqrt(float(headdim));
|
| 102 |
+
params.total_padding_tokens = nullptr;
|
| 103 |
+
params.masked_tokens = nullptr;
|
| 104 |
+
params.prefix_prompt_lengths = nullptr;
|
| 105 |
+
params.max_prefix_prompt_length = 0;
|
| 106 |
+
params.relative_attention_bias = nullptr;
|
| 107 |
+
params.relative_attention_bias_stride = 0;
|
| 108 |
+
params.cross_attention_out = nullptr;
|
| 109 |
+
params.max_decoder_seq_len = 0;
|
| 110 |
+
params.is_return_cross_attentions = false;
|
| 111 |
+
params.finished = nullptr;
|
| 112 |
+
params.memory_length_per_sample = nullptr;
|
| 113 |
+
params.length_per_sample = length_per_sample;
|
| 114 |
+
params.rotary_cos = rotary_cos;
|
| 115 |
+
params.rotary_sin = rotary_sin;
|
| 116 |
+
params.nnz_head_idx = nnz_head_idx;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
torch::Tensor single_query_attention(const torch::Tensor q,
|
| 120 |
+
const torch::Tensor k,
|
| 121 |
+
const torch::Tensor v,
|
| 122 |
+
torch::Tensor k_cache,
|
| 123 |
+
torch::Tensor v_cache,
|
| 124 |
+
std::optional<const torch::Tensor> length_per_sample_,
|
| 125 |
+
std::optional<const torch::Tensor> rotary_cos_,
|
| 126 |
+
std::optional<const torch::Tensor> rotary_sin_,
|
| 127 |
+
std::optional<const torch::Tensor> nnz_head_idx_,
|
| 128 |
+
const int timestep,
|
| 129 |
+
int rotary_embedding_dim = 0,
|
| 130 |
+
const float rotary_base = 10000.0f,
|
| 131 |
+
const bool neox_rotary_style=true) {
|
| 132 |
+
CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v); CHECK_DEVICE(k_cache); CHECK_DEVICE(v_cache);
|
| 133 |
+
int batch_size = v_cache.size(0);
|
| 134 |
+
int nheads = q.size(1);
|
| 135 |
+
int nheads_kv = v_cache.size(1);
|
| 136 |
+
int memory_max_seqlen = v_cache.size(2);
|
| 137 |
+
int headdim = v_cache.size(3);
|
| 138 |
+
auto input_type = q.scalar_type();
|
| 139 |
+
TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
|
| 140 |
+
|
| 141 |
+
CHECK_SHAPE(q, batch_size, nheads, headdim);
|
| 142 |
+
CHECK_SHAPE(k, batch_size, nheads_kv, headdim);
|
| 143 |
+
CHECK_SHAPE(v, batch_size, nheads_kv, headdim);
|
| 144 |
+
CHECK_SHAPE(v_cache, batch_size, nheads_kv, memory_max_seqlen, headdim);
|
| 145 |
+
// k_cache shape: [B, H, Dh/x, L, x] where x=8 for fp16 and x=4 for fp32
|
| 146 |
+
int packsize = k_cache.dtype() == torch::kFloat32 ? 4 : 8;
|
| 147 |
+
CHECK_SHAPE(k_cache, batch_size, nheads_kv, headdim / packsize, memory_max_seqlen, packsize);
|
| 148 |
+
TORCH_CHECK(q.stride(2) == 1 && q.stride(1) == headdim);
|
| 149 |
+
TORCH_CHECK(k.stride(2) == 1 && k.stride(1) == headdim);
|
| 150 |
+
TORCH_CHECK(v.stride(2) == 1 && v.stride(1) == headdim);
|
| 151 |
+
CHECK_CONTIGUOUS(v_cache); CHECK_CONTIGUOUS(k_cache);
|
| 152 |
+
|
| 153 |
+
TORCH_CHECK(q.scalar_type() == input_type);
|
| 154 |
+
TORCH_CHECK(k.scalar_type() == input_type);
|
| 155 |
+
TORCH_CHECK(v.scalar_type() == input_type);
|
| 156 |
+
TORCH_CHECK(k_cache.scalar_type() == input_type);
|
| 157 |
+
TORCH_CHECK(v_cache.scalar_type() == input_type);
|
| 158 |
+
|
| 159 |
+
if (length_per_sample_.has_value()) {
|
| 160 |
+
auto length_per_sample = length_per_sample_.value();
|
| 161 |
+
CHECK_DEVICE(length_per_sample);
|
| 162 |
+
CHECK_SHAPE(length_per_sample, batch_size);
|
| 163 |
+
CHECK_CONTIGUOUS(length_per_sample);
|
| 164 |
+
TORCH_CHECK(length_per_sample.dtype() == torch::kInt32);
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
if (rotary_cos_.has_value()) {
|
| 168 |
+
auto rotary_cos = rotary_cos_.value();
|
| 169 |
+
CHECK_DEVICE(rotary_cos);
|
| 170 |
+
rotary_embedding_dim = rotary_cos.size(-1) * 2;
|
| 171 |
+
CHECK_SHAPE(rotary_cos, batch_size, rotary_embedding_dim / 2);
|
| 172 |
+
CHECK_CONTIGUOUS(rotary_cos);
|
| 173 |
+
TORCH_CHECK(rotary_cos.scalar_type() == input_type);
|
| 174 |
+
|
| 175 |
+
TORCH_CHECK(rotary_sin_.has_value());
|
| 176 |
+
auto rotary_sin = rotary_sin_.value();
|
| 177 |
+
CHECK_DEVICE(rotary_sin);
|
| 178 |
+
CHECK_SHAPE(rotary_sin, batch_size, rotary_embedding_dim / 2);
|
| 179 |
+
CHECK_CONTIGUOUS(rotary_sin);
|
| 180 |
+
TORCH_CHECK(rotary_sin.scalar_type() == input_type);
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
if (nnz_head_idx_.has_value()) {
|
| 184 |
+
auto nnz_head_idx = nnz_head_idx_.value();
|
| 185 |
+
CHECK_DEVICE(nnz_head_idx);
|
| 186 |
+
int nnz_heads = nnz_head_idx.size(0);
|
| 187 |
+
CHECK_SHAPE(nnz_head_idx, nnz_heads);
|
| 188 |
+
CHECK_CONTIGUOUS(nnz_head_idx);
|
| 189 |
+
TORCH_CHECK(nnz_head_idx.dtype() == torch::kInt32);
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 193 |
+
at::cuda::CUDAGuard device_guard{q.device()};
|
| 194 |
+
|
| 195 |
+
torch::Tensor out = torch::empty_like(q);
|
| 196 |
+
|
| 197 |
+
DISPATCH_FLOAT_AND_HALF_AND_BF16(q.scalar_type(), "single_query_attention", [&] {
|
| 198 |
+
using DataType = typename SATypeConverter<scalar_t>::Type;
|
| 199 |
+
Masked_multihead_attention_params<DataType> params;
|
| 200 |
+
set_params(params, batch_size, nheads, nheads_kv, memory_max_seqlen, headdim, timestep,
|
| 201 |
+
rotary_embedding_dim, rotary_base, neox_rotary_style,
|
| 202 |
+
q.stride(0), k.stride(0), v.stride(0),
|
| 203 |
+
nnz_head_idx_.has_value() ? nnz_head_idx_.value().size(0) : 0,
|
| 204 |
+
reinterpret_cast<DataType*>(q.data_ptr()),
|
| 205 |
+
reinterpret_cast<DataType*>(k.data_ptr()),
|
| 206 |
+
reinterpret_cast<DataType*>(v.data_ptr()),
|
| 207 |
+
reinterpret_cast<DataType*>(k_cache.data_ptr()),
|
| 208 |
+
reinterpret_cast<DataType*>(v_cache.data_ptr()),
|
| 209 |
+
length_per_sample_.has_value()
|
| 210 |
+
? length_per_sample_.value().data_ptr<int>() : nullptr,
|
| 211 |
+
rotary_cos_.has_value()
|
| 212 |
+
? reinterpret_cast<DataType*>(rotary_cos_.value().data_ptr()) : nullptr,
|
| 213 |
+
rotary_sin_.has_value()
|
| 214 |
+
? reinterpret_cast<DataType*>(rotary_sin_.value().data_ptr()) : nullptr,
|
| 215 |
+
reinterpret_cast<DataType*>(out.data_ptr()),
|
| 216 |
+
nnz_head_idx_.has_value() ? nnz_head_idx_.value().data_ptr<int>() : nullptr
|
| 217 |
+
);
|
| 218 |
+
auto stream = at::cuda::getCurrentCUDAStream();
|
| 219 |
+
masked_multihead_attention(params, stream);
|
| 220 |
+
});
|
| 221 |
+
return out;
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 225 |
+
m.def("single_query_attention", &single_query_attention, "Attention with a single query",
|
| 226 |
+
py::arg("q"), py::arg("k"), py::arg("v"), py::arg("k_cache"), py::arg("v_cache"),
|
| 227 |
+
py::arg("length_per_sample_"), py::arg("rotary_cos_"),
|
| 228 |
+
py::arg("rotary_sin_"), py::arg("nnz_head_idx_"),
|
| 229 |
+
py::arg("timestep"), py::arg("rotary_embedding_dim")=0,
|
| 230 |
+
py::arg("rotary_base")=10000.0f, py::arg("neox_rotary_style")=true);
|
| 231 |
+
}
|
Code/Baselines/flash-attention/csrc/ft_attention/setup.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
|
| 2 |
+
import sys
|
| 3 |
+
import warnings
|
| 4 |
+
import os
|
| 5 |
+
from packaging.version import parse, Version
|
| 6 |
+
|
| 7 |
+
from setuptools import setup, find_packages
|
| 8 |
+
import subprocess
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# ninja build does not work unless include_dirs are abs path
|
| 15 |
+
this_dir = os.path.dirname(os.path.abspath(__file__))
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_cuda_bare_metal_version(cuda_dir):
|
| 19 |
+
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
|
| 20 |
+
output = raw_output.split()
|
| 21 |
+
release_idx = output.index("release") + 1
|
| 22 |
+
bare_metal_version = parse(output[release_idx].split(",")[0])
|
| 23 |
+
|
| 24 |
+
return raw_output, bare_metal_version
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
|
| 28 |
+
raw_output, bare_metal_version = get_cuda_bare_metal_version(cuda_dir)
|
| 29 |
+
torch_binary_version = parse(torch.version.cuda)
|
| 30 |
+
|
| 31 |
+
print("\nCompiling cuda extensions with")
|
| 32 |
+
print(raw_output + "from " + cuda_dir + "/bin\n")
|
| 33 |
+
|
| 34 |
+
if (bare_metal_version != torch_binary_version):
|
| 35 |
+
raise RuntimeError(
|
| 36 |
+
"Cuda extensions are being compiled with a version of Cuda that does "
|
| 37 |
+
"not match the version used to compile Pytorch binaries. "
|
| 38 |
+
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
|
| 39 |
+
+ "In some cases, a minor-version mismatch will not cause later errors: "
|
| 40 |
+
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
|
| 41 |
+
"You can try commenting out this check (at your own risk)."
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def raise_if_cuda_home_none(global_option: str) -> None:
|
| 46 |
+
if CUDA_HOME is not None:
|
| 47 |
+
return
|
| 48 |
+
raise RuntimeError(
|
| 49 |
+
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
|
| 50 |
+
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
|
| 51 |
+
"only images whose names contain 'devel' will provide nvcc."
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def append_nvcc_threads(nvcc_extra_args):
|
| 56 |
+
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
|
| 57 |
+
if bare_metal_version >= Version("11.2"):
|
| 58 |
+
nvcc_threads = os.getenv("NVCC_THREADS") or "4"
|
| 59 |
+
return nvcc_extra_args + ["--threads", nvcc_threads]
|
| 60 |
+
return nvcc_extra_args
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
if not torch.cuda.is_available():
|
| 64 |
+
# https://github.com/NVIDIA/apex/issues/486
|
| 65 |
+
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
|
| 66 |
+
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
|
| 67 |
+
print(
|
| 68 |
+
"\nWarning: Torch did not find available GPUs on this system.\n",
|
| 69 |
+
"If your intention is to cross-compile, this is not an error.\n"
|
| 70 |
+
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
|
| 71 |
+
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
|
| 72 |
+
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
|
| 73 |
+
"If you wish to cross-compile for a single specific architecture,\n"
|
| 74 |
+
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
|
| 75 |
+
)
|
| 76 |
+
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None and CUDA_HOME is not None:
|
| 77 |
+
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
|
| 78 |
+
if bare_metal_version >= Version("11.8"):
|
| 79 |
+
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6;9.0"
|
| 80 |
+
elif bare_metal_version >= Version("11.1"):
|
| 81 |
+
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
|
| 82 |
+
elif bare_metal_version == Version("11.0"):
|
| 83 |
+
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
|
| 84 |
+
else:
|
| 85 |
+
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
|
| 89 |
+
TORCH_MAJOR = int(torch.__version__.split(".")[0])
|
| 90 |
+
TORCH_MINOR = int(torch.__version__.split(".")[1])
|
| 91 |
+
|
| 92 |
+
cmdclass = {}
|
| 93 |
+
ext_modules = []
|
| 94 |
+
|
| 95 |
+
# Check, if ATen/CUDAGeneratorImpl.h is found, otherwise use ATen/cuda/CUDAGeneratorImpl.h
|
| 96 |
+
# See https://github.com/pytorch/pytorch/pull/70650
|
| 97 |
+
generator_flag = []
|
| 98 |
+
torch_dir = torch.__path__[0]
|
| 99 |
+
if os.path.exists(os.path.join(torch_dir, "include", "ATen", "CUDAGeneratorImpl.h")):
|
| 100 |
+
generator_flag = ["-DOLD_GENERATOR_PATH"]
|
| 101 |
+
|
| 102 |
+
raise_if_cuda_home_none("--ft_attention")
|
| 103 |
+
# Check, if CUDA11 is installed for compute capability 8.0
|
| 104 |
+
cc_flag = []
|
| 105 |
+
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
|
| 106 |
+
if bare_metal_version < Version("11.0"):
|
| 107 |
+
raise RuntimeError("ft_attention is only supported on CUDA 11 and above")
|
| 108 |
+
cc_flag.append("-gencode")
|
| 109 |
+
cc_flag.append("arch=compute_70,code=sm_70")
|
| 110 |
+
cc_flag.append("-gencode")
|
| 111 |
+
cc_flag.append("arch=compute_80,code=sm_80")
|
| 112 |
+
if bare_metal_version >= Version("11.8"):
|
| 113 |
+
cc_flag.append("-gencode")
|
| 114 |
+
cc_flag.append("arch=compute_90,code=sm_90")
|
| 115 |
+
|
| 116 |
+
ext_modules.append(
|
| 117 |
+
CUDAExtension(
|
| 118 |
+
name="ft_attention",
|
| 119 |
+
sources=[
|
| 120 |
+
"ft_attention.cpp",
|
| 121 |
+
"decoder_masked_multihead_attention.cu",
|
| 122 |
+
],
|
| 123 |
+
extra_compile_args={
|
| 124 |
+
"cxx": ["-O3", "-DENABLE_BF16"] + generator_flag,
|
| 125 |
+
"nvcc": append_nvcc_threads(
|
| 126 |
+
[
|
| 127 |
+
"-DENABLE_BF16", # TODO
|
| 128 |
+
"-O3",
|
| 129 |
+
"-U__CUDA_NO_HALF_OPERATORS__",
|
| 130 |
+
"-U__CUDA_NO_HALF_CONVERSIONS__",
|
| 131 |
+
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
|
| 132 |
+
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
|
| 133 |
+
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
|
| 134 |
+
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
|
| 135 |
+
"--expt-relaxed-constexpr",
|
| 136 |
+
"--expt-extended-lambda",
|
| 137 |
+
"--use_fast_math",
|
| 138 |
+
]
|
| 139 |
+
+ generator_flag
|
| 140 |
+
+ cc_flag
|
| 141 |
+
),
|
| 142 |
+
},
|
| 143 |
+
include_dirs=[this_dir],
|
| 144 |
+
)
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
setup(
|
| 148 |
+
name="ft_attention",
|
| 149 |
+
version="0.1",
|
| 150 |
+
description="Attention for single query from FasterTransformer",
|
| 151 |
+
ext_modules=ext_modules,
|
| 152 |
+
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
|
| 153 |
+
)
|
Code/Baselines/flash-attention/csrc/fused_dense_lib/README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
This CUDA extension implements fused matmul + bias (forward and backward), and fused matmul + bias + gelu
|
| 2 |
+
(forward and backward), adapted from Apex's
|
| 3 |
+
[FusedDense](https://github.com/NVIDIA/apex/tree/master/apex/fused_dense).
|
| 4 |
+
We make it work for bfloat16.
|
| 5 |
+
|
| 6 |
+
For best performance, you should use CUDA >= 11.8. CuBLAS versions before
|
| 7 |
+
this doesn't have the best matmul + bias + gelu performance for bfloat16.
|
| 8 |
+
|
| 9 |
+
It has only been tested on A100s.
|
| 10 |
+
|
| 11 |
+
```sh
|
| 12 |
+
cd csrc/fused_dense_lib && pip install .
|
| 13 |
+
```
|
Code/Baselines/flash-attention/csrc/fused_dense_lib/fused_dense.cpp
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Adapted from https://github.com/NVIDIA/apex/blob/master/csrc/fused_dense.cpp
|
| 2 |
+
// We make it work for bfloat16
|
| 3 |
+
#include <torch/extension.h>
|
| 4 |
+
#include <torch/torch.h>
|
| 5 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 6 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
#include <stdio.h>
|
| 10 |
+
|
| 11 |
+
#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
|
| 12 |
+
|
| 13 |
+
// https://github.com/NVIDIA/apex/blob/master/csrc/type_shim.h
|
| 14 |
+
// #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
|
| 15 |
+
#define DISPATCH_HALF_AND_BF16(TYPE, NAME, ...) \
|
| 16 |
+
switch (TYPE) { \
|
| 17 |
+
case at::ScalarType::Half: { \
|
| 18 |
+
using scalar_t = at::Half; \
|
| 19 |
+
__VA_ARGS__(); \
|
| 20 |
+
break; \
|
| 21 |
+
} \
|
| 22 |
+
case at::ScalarType::BFloat16: { \
|
| 23 |
+
using scalar_t = at::BFloat16; \
|
| 24 |
+
__VA_ARGS__(); \
|
| 25 |
+
break; \
|
| 26 |
+
} \
|
| 27 |
+
default: \
|
| 28 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
template <typename T>
|
| 32 |
+
int linear_bias_wgrad_cuda(const T *input, const T *d_output, int64_t in_features, int64_t batch_size, int64_t out_features, T *d_weight, T *d_bias, void *lt_workspace, size_t workspaceSize);
|
| 33 |
+
|
| 34 |
+
template <typename T>
|
| 35 |
+
int linear_act_forward_cuda(const T *input, const T *weight, const T *bias, int64_t in_features, int64_t batch_size, int64_t out_features, bool is_gelu, int heuristic, T *output, void *pre_act, void *lt_workspace, size_t workspaceSize);
|
| 36 |
+
|
| 37 |
+
template <typename T>
|
| 38 |
+
int bias_act_linear_dgrad_bgrad_cuda(const T *weight, const T *d_output, const void *pre_act, int64_t in_features, int64_t batch_size, int64_t out_features, bool is_gelu, int heuristic, T *d_input, T *d_bias, void *lt_workspace, size_t workspaceSize);
|
| 39 |
+
|
| 40 |
+
std::vector<at::Tensor> linear_bias_wgrad(at::Tensor input, at::Tensor d_output, bool has_d_bias) {
|
| 41 |
+
|
| 42 |
+
int64_t batch_size = input.size(0);
|
| 43 |
+
int64_t in_features = input.size(1);
|
| 44 |
+
int64_t out_features = d_output.size(1);
|
| 45 |
+
|
| 46 |
+
TORCH_CHECK(input.dtype() == torch::kFloat16 || input.dtype() == torch::kBFloat16);
|
| 47 |
+
TORCH_CHECK(input.dtype() == d_output.dtype());
|
| 48 |
+
TORCH_CHECK(input.is_cuda());
|
| 49 |
+
TORCH_CHECK(d_output.is_cuda());
|
| 50 |
+
TORCH_CHECK(input.is_contiguous());
|
| 51 |
+
TORCH_CHECK(d_output.is_contiguous());
|
| 52 |
+
CHECK_SHAPE(input, batch_size, in_features);
|
| 53 |
+
CHECK_SHAPE(d_output, batch_size, out_features);
|
| 54 |
+
|
| 55 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 56 |
+
at::cuda::CUDAGuard device_guard{input.device()};
|
| 57 |
+
|
| 58 |
+
// create output/workspace tensor
|
| 59 |
+
auto opts = input.options();
|
| 60 |
+
auto d_weight = at::empty({out_features, in_features}, opts);
|
| 61 |
+
at::Tensor d_bias;
|
| 62 |
+
if (has_d_bias) {
|
| 63 |
+
#if defined(CUBLAS_VERSION) && CUBLAS_VERSION < 11600
|
| 64 |
+
d_bias = d_output.view({-1, out_features}).sum(0, false);
|
| 65 |
+
#else
|
| 66 |
+
d_bias = at::empty({out_features}, opts);
|
| 67 |
+
#endif
|
| 68 |
+
}
|
| 69 |
+
// See https://github.com/pytorch/pytorch/issues/73328 for reasoning behind setting this to 1M.
|
| 70 |
+
// However, Apex sets it to 4M and TransformerEngine sets to 32M for Hopper and 4M for other GPUs
|
| 71 |
+
// https://github.com/NVIDIA/TransformerEngine/blob/a0f0065498bbcfc1da78cf9e8b166f5381613fbc/transformer_engine/pytorch/module.py#L91
|
| 72 |
+
size_t workspaceSize = 1024 * 1024 * (at::cuda::getCurrentDeviceProperties()->major >= 9 ? 32 : 4);
|
| 73 |
+
auto lt_workspace = at::empty({static_cast<int64_t>(workspaceSize)}, opts.dtype(torch::kUInt8));
|
| 74 |
+
|
| 75 |
+
DISPATCH_HALF_AND_BF16(input.scalar_type(), "linear_bias_wgrad", [&] {
|
| 76 |
+
auto result = linear_bias_wgrad_cuda<scalar_t>(
|
| 77 |
+
input.data_ptr<scalar_t>(),
|
| 78 |
+
d_output.data_ptr<scalar_t>(),
|
| 79 |
+
in_features,
|
| 80 |
+
batch_size,
|
| 81 |
+
out_features,
|
| 82 |
+
d_weight.data_ptr<scalar_t>(),
|
| 83 |
+
has_d_bias ? d_bias.data_ptr<scalar_t>() : nullptr,
|
| 84 |
+
(void*) (lt_workspace.data_ptr()),
|
| 85 |
+
workspaceSize);
|
| 86 |
+
TORCH_CHECK(result == 0, "linear_bias_wgrad failed.");
|
| 87 |
+
});
|
| 88 |
+
|
| 89 |
+
return {d_weight, d_bias};
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
std::vector<at::Tensor> linear_act_forward(at::Tensor input, at::Tensor weight,
|
| 93 |
+
std::optional<at::Tensor> bias_,
|
| 94 |
+
bool is_gelu, bool save_pre_act, int heuristic) {
|
| 95 |
+
|
| 96 |
+
int64_t batch_size = input.size(0);
|
| 97 |
+
int64_t in_features = input.size(1);
|
| 98 |
+
int64_t out_features = weight.size(0);
|
| 99 |
+
|
| 100 |
+
TORCH_CHECK(input.dtype() == torch::kFloat16 || input.dtype() == torch::kBFloat16);
|
| 101 |
+
TORCH_CHECK(input.dtype() == weight.dtype());
|
| 102 |
+
TORCH_CHECK(input.is_cuda());
|
| 103 |
+
TORCH_CHECK(weight.is_cuda());
|
| 104 |
+
TORCH_CHECK(input.is_contiguous());
|
| 105 |
+
TORCH_CHECK(weight.is_contiguous());
|
| 106 |
+
CHECK_SHAPE(input, batch_size, in_features);
|
| 107 |
+
CHECK_SHAPE(weight, out_features, in_features);
|
| 108 |
+
if (bias_.has_value()) {
|
| 109 |
+
auto bias = bias_.value();
|
| 110 |
+
TORCH_CHECK(bias.dtype() == input.dtype());
|
| 111 |
+
TORCH_CHECK(bias.is_cuda());
|
| 112 |
+
TORCH_CHECK(bias.is_contiguous());
|
| 113 |
+
CHECK_SHAPE(bias, out_features);
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 117 |
+
at::cuda::CUDAGuard device_guard{input.device()};
|
| 118 |
+
|
| 119 |
+
// create output/workspace tensor
|
| 120 |
+
auto opts = input.options();
|
| 121 |
+
auto output = at::empty({batch_size, out_features}, opts);
|
| 122 |
+
at::Tensor pre_act;
|
| 123 |
+
// If ReLU, cuBlasLT stores a bit-mask (1 bit per element)
|
| 124 |
+
if (save_pre_act) { pre_act = at::empty({batch_size, is_gelu ? out_features : out_features / 8},
|
| 125 |
+
is_gelu ? opts : opts.dtype(torch::kUInt8)); }
|
| 126 |
+
// See https://github.com/pytorch/pytorch/issues/73328 for reasoning behind setting this to 1M.
|
| 127 |
+
// However, Apex sets it to 4M and TransformerEngine sets to 32M for Hopper and 4M for other GPUs
|
| 128 |
+
// https://github.com/NVIDIA/TransformerEngine/blob/a0f0065498bbcfc1da78cf9e8b166f5381613fbc/transformer_engine/pytorch/module.py#L91
|
| 129 |
+
size_t workspaceSize = 1024 * 1024 * (at::cuda::getCurrentDeviceProperties()->major >= 9 ? 32 : 4);
|
| 130 |
+
auto lt_workspace = at::empty({static_cast<int64_t>(workspaceSize)}, opts.dtype(torch::kUInt8));
|
| 131 |
+
|
| 132 |
+
DISPATCH_HALF_AND_BF16(input.scalar_type(), "linear_act_forward", [&] {
|
| 133 |
+
auto result = linear_act_forward_cuda<scalar_t>(
|
| 134 |
+
input.data_ptr<scalar_t>(),
|
| 135 |
+
weight.data_ptr<scalar_t>(),
|
| 136 |
+
bias_.has_value()? bias_.value().data_ptr<scalar_t>() : nullptr,
|
| 137 |
+
in_features,
|
| 138 |
+
batch_size,
|
| 139 |
+
out_features,
|
| 140 |
+
is_gelu,
|
| 141 |
+
heuristic,
|
| 142 |
+
output.data_ptr<scalar_t>(),
|
| 143 |
+
save_pre_act ? pre_act.data_ptr() : nullptr,
|
| 144 |
+
(void*) (lt_workspace.data_ptr()),
|
| 145 |
+
workspaceSize);
|
| 146 |
+
TORCH_CHECK(result == 0, "linear_act_forward failed.");
|
| 147 |
+
});
|
| 148 |
+
|
| 149 |
+
std::vector<at::Tensor> result = {output};
|
| 150 |
+
if (save_pre_act) { result.push_back(pre_act); };
|
| 151 |
+
return result;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
std::vector<at::Tensor> bias_act_linear_dgrad_bgrad(
|
| 155 |
+
at::Tensor weight, at::Tensor d_output, at::Tensor pre_act, bool is_gelu, int heuristic
|
| 156 |
+
) {
|
| 157 |
+
|
| 158 |
+
int64_t batch_size = d_output.size(0);
|
| 159 |
+
int64_t out_features = d_output.size(1);
|
| 160 |
+
int64_t in_features = weight.size(1);
|
| 161 |
+
|
| 162 |
+
TORCH_CHECK(weight.dtype() == torch::kFloat16 || weight.dtype() == torch::kBFloat16);
|
| 163 |
+
TORCH_CHECK(weight.dtype() == d_output.dtype());
|
| 164 |
+
TORCH_CHECK(is_gelu ? (pre_act.dtype() == weight.dtype()) : (pre_act.dtype() == torch::kUInt8));
|
| 165 |
+
TORCH_CHECK(weight.is_cuda());
|
| 166 |
+
TORCH_CHECK(d_output.is_cuda());
|
| 167 |
+
TORCH_CHECK(pre_act.is_cuda());
|
| 168 |
+
TORCH_CHECK(weight.is_contiguous());
|
| 169 |
+
TORCH_CHECK(d_output.is_contiguous());
|
| 170 |
+
TORCH_CHECK(pre_act.is_contiguous());
|
| 171 |
+
CHECK_SHAPE(weight, out_features, in_features);
|
| 172 |
+
CHECK_SHAPE(d_output, batch_size, out_features);
|
| 173 |
+
// If ReLU, cuBlasLT stores a bit-mask (1 bit per element)
|
| 174 |
+
CHECK_SHAPE(pre_act, batch_size, is_gelu ? in_features : in_features / 8);
|
| 175 |
+
|
| 176 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 177 |
+
at::cuda::CUDAGuard device_guard{weight.device()};
|
| 178 |
+
|
| 179 |
+
// create output/workspace tensor
|
| 180 |
+
auto opts = weight.options();
|
| 181 |
+
auto d_bias = at::empty({in_features}, opts);
|
| 182 |
+
auto d_input = at::empty({batch_size, in_features}, opts);
|
| 183 |
+
// See https://github.com/pytorch/pytorch/issues/73328 for reasoning behind setting this to 1M.
|
| 184 |
+
// However, Apex sets it to 4M and TransformerEngine sets to 32M for Hopper and 4M for other GPUs
|
| 185 |
+
// https://github.com/NVIDIA/TransformerEngine/blob/a0f0065498bbcfc1da78cf9e8b166f5381613fbc/transformer_engine/pytorch/module.py#L91
|
| 186 |
+
size_t workspaceSize = 1024 * 1024 * (at::cuda::getCurrentDeviceProperties()->major >= 9 ? 32 : 4);
|
| 187 |
+
auto lt_workspace = at::empty({static_cast<int64_t>(workspaceSize)}, opts.dtype(torch::kUInt8));
|
| 188 |
+
|
| 189 |
+
DISPATCH_HALF_AND_BF16(weight.scalar_type(), "bias_act_linear_dgrad_bgrad", [&] {
|
| 190 |
+
auto result = bias_act_linear_dgrad_bgrad_cuda<scalar_t>(
|
| 191 |
+
weight.data_ptr<scalar_t>(),
|
| 192 |
+
d_output.data_ptr<scalar_t>(),
|
| 193 |
+
pre_act.data_ptr(),
|
| 194 |
+
in_features,
|
| 195 |
+
batch_size,
|
| 196 |
+
out_features,
|
| 197 |
+
is_gelu,
|
| 198 |
+
heuristic,
|
| 199 |
+
d_input.data_ptr<scalar_t>(),
|
| 200 |
+
d_bias.data_ptr<scalar_t>(),
|
| 201 |
+
(void*) (lt_workspace.data_ptr()),
|
| 202 |
+
workspaceSize);
|
| 203 |
+
TORCH_CHECK(result == 0, "bias_act_linear_dgrad_bgrad failed.");
|
| 204 |
+
});
|
| 205 |
+
|
| 206 |
+
return {d_input, d_bias};
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 210 |
+
m.def("linear_bias_wgrad", &linear_bias_wgrad, "linear bias wgrad");
|
| 211 |
+
m.def("linear_act_forward", &linear_act_forward, "linear gelu/relu forward");
|
| 212 |
+
m.def("bias_act_linear_dgrad_bgrad", &bias_act_linear_dgrad_bgrad, "bias gelu/relu linear dgrad bgrad");
|
| 213 |
+
}
|
Code/Baselines/flash-attention/csrc/fused_dense_lib/setup.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import subprocess
|
| 3 |
+
from packaging.version import parse, Version
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from setuptools import setup
|
| 7 |
+
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def get_cuda_bare_metal_version(cuda_dir):
|
| 11 |
+
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
|
| 12 |
+
output = raw_output.split()
|
| 13 |
+
release_idx = output.index("release") + 1
|
| 14 |
+
bare_metal_version = parse(output[release_idx].split(",")[0])
|
| 15 |
+
|
| 16 |
+
return raw_output, bare_metal_version
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def append_nvcc_threads(nvcc_extra_args):
|
| 20 |
+
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
|
| 21 |
+
if bare_metal_version >= Version("11.2"):
|
| 22 |
+
nvcc_threads = os.getenv("NVCC_THREADS") or "4"
|
| 23 |
+
return nvcc_extra_args + ["--threads", nvcc_threads]
|
| 24 |
+
return nvcc_extra_args
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
setup(
|
| 28 |
+
name='fused_dense_lib',
|
| 29 |
+
ext_modules=[
|
| 30 |
+
CUDAExtension(
|
| 31 |
+
name='fused_dense_lib',
|
| 32 |
+
sources=['fused_dense.cpp', 'fused_dense_cuda.cu'],
|
| 33 |
+
extra_compile_args={
|
| 34 |
+
'cxx': ['-O3',],
|
| 35 |
+
'nvcc': append_nvcc_threads(['-O3'])
|
| 36 |
+
}
|
| 37 |
+
)
|
| 38 |
+
],
|
| 39 |
+
cmdclass={
|
| 40 |
+
'build_ext': BuildExtension
|
| 41 |
+
})
|
| 42 |
+
|
Code/Baselines/flash-attention/csrc/fused_softmax/fused_softmax.cpp
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* coding=utf-8
|
| 2 |
+
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#include <cuda_fp16.h>
|
| 18 |
+
#include <torch/extension.h>
|
| 19 |
+
#include <vector>
|
| 20 |
+
|
| 21 |
+
namespace multihead_attn {
|
| 22 |
+
namespace fused_softmax {
|
| 23 |
+
namespace scaled_masked_softmax {
|
| 24 |
+
|
| 25 |
+
torch::Tensor fwd_cuda(
|
| 26 |
+
torch::Tensor const& input,
|
| 27 |
+
torch::Tensor const& mask,
|
| 28 |
+
float scale_factor);
|
| 29 |
+
|
| 30 |
+
torch::Tensor bwd_cuda(
|
| 31 |
+
torch::Tensor const& output_grads,
|
| 32 |
+
torch::Tensor const& softmax_results,
|
| 33 |
+
float scale_factor);
|
| 34 |
+
|
| 35 |
+
int get_batch_per_block_cuda(
|
| 36 |
+
int query_seq_len,
|
| 37 |
+
int key_seq_len,
|
| 38 |
+
int batches,
|
| 39 |
+
int attn_heads);
|
| 40 |
+
|
| 41 |
+
torch::Tensor fwd(
|
| 42 |
+
torch::Tensor const& input,
|
| 43 |
+
torch::Tensor const& mask,
|
| 44 |
+
float scale_factor) {
|
| 45 |
+
AT_ASSERTM(input.dim() == 4, "expected 4D tensor");
|
| 46 |
+
AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) ||
|
| 47 |
+
(input.scalar_type() == at::ScalarType::BFloat16),
|
| 48 |
+
"Only fp16 and bf16 are supported");
|
| 49 |
+
AT_ASSERTM(mask.dim() == 4, "expected 4D tensor");
|
| 50 |
+
|
| 51 |
+
return fwd_cuda(input, mask, scale_factor);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
torch::Tensor bwd(
|
| 55 |
+
torch::Tensor const& output_grads,
|
| 56 |
+
torch::Tensor const& softmax_results,
|
| 57 |
+
float scale_factor) {
|
| 58 |
+
|
| 59 |
+
AT_ASSERTM(output_grads.dim() == 4, "expected 3D tensor");
|
| 60 |
+
AT_ASSERTM(softmax_results.dim() == 4, "expected 3D tensor");
|
| 61 |
+
|
| 62 |
+
AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) ||
|
| 63 |
+
(output_grads.scalar_type() == at::ScalarType::BFloat16),
|
| 64 |
+
"Only fp16 and bf16 are supported");
|
| 65 |
+
AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) ||
|
| 66 |
+
(softmax_results.scalar_type() == at::ScalarType::BFloat16),
|
| 67 |
+
"Only fp16 and bf16 are supported");
|
| 68 |
+
|
| 69 |
+
return bwd_cuda(output_grads, softmax_results, scale_factor);
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
int get_batch_per_block(
|
| 73 |
+
int query_seq_len,
|
| 74 |
+
int key_seq_len,
|
| 75 |
+
int batches,
|
| 76 |
+
int attn_heads) {
|
| 77 |
+
return get_batch_per_block_cuda(query_seq_len, key_seq_len, batches, attn_heads);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
} // end namespace scaled_masked_softmax
|
| 81 |
+
} // end namespace fused_softmax
|
| 82 |
+
} // end namespace multihead_attn
|
| 83 |
+
|
| 84 |
+
namespace multihead_attn {
|
| 85 |
+
namespace fused_softmax {
|
| 86 |
+
namespace scaled_upper_triang_masked_softmax {
|
| 87 |
+
|
| 88 |
+
torch::Tensor fwd_cuda(
|
| 89 |
+
torch::Tensor const& input,
|
| 90 |
+
float scale_factor);
|
| 91 |
+
|
| 92 |
+
torch::Tensor bwd_cuda(
|
| 93 |
+
torch::Tensor const& output_grads,
|
| 94 |
+
torch::Tensor const& softmax_results,
|
| 95 |
+
float scale_factor);
|
| 96 |
+
|
| 97 |
+
torch::Tensor fwd(torch::Tensor const& input, float scale_factor) {
|
| 98 |
+
AT_ASSERTM(input.dim() == 3, "expected 3D tensor");
|
| 99 |
+
AT_ASSERTM((input.scalar_type() == at::ScalarType::Half) ||
|
| 100 |
+
(input.scalar_type() == at::ScalarType::BFloat16),
|
| 101 |
+
"Only fp16 and bf16 are supported");
|
| 102 |
+
|
| 103 |
+
return fwd_cuda(input, scale_factor);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
torch::Tensor bwd(
|
| 107 |
+
torch::Tensor const& output_grads,
|
| 108 |
+
torch::Tensor const& softmax_results,
|
| 109 |
+
float scale_factor) {
|
| 110 |
+
|
| 111 |
+
AT_ASSERTM(output_grads.dim() == 3, "expected 3D tensor");
|
| 112 |
+
AT_ASSERTM(softmax_results.dim() == 3, "expected 3D tensor");
|
| 113 |
+
|
| 114 |
+
AT_ASSERTM((output_grads.scalar_type() == at::ScalarType::Half) ||
|
| 115 |
+
(output_grads.scalar_type() == at::ScalarType::BFloat16),
|
| 116 |
+
"Only fp16 and bf16 are supported");
|
| 117 |
+
AT_ASSERTM((softmax_results.scalar_type() == at::ScalarType::Half) ||
|
| 118 |
+
(softmax_results.scalar_type() == at::ScalarType::BFloat16),
|
| 119 |
+
"Only fp16 and bf16 are supported");
|
| 120 |
+
|
| 121 |
+
return bwd_cuda(output_grads, softmax_results, scale_factor);
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
} // end namespace scaled_upper_triang_masked_softmax
|
| 125 |
+
} // end namespace fused_softmax
|
| 126 |
+
} // end namespace multihead_attn
|
| 127 |
+
|
| 128 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 129 |
+
m.def("scaled_masked_softmax_forward",
|
| 130 |
+
&multihead_attn::fused_softmax::scaled_masked_softmax::fwd,
|
| 131 |
+
"Self Multihead Attention scaled, time masked softmax -- Forward.");
|
| 132 |
+
|
| 133 |
+
m.def("scaled_masked_softmax_backward",
|
| 134 |
+
&multihead_attn::fused_softmax::scaled_masked_softmax::bwd,
|
| 135 |
+
"Self Multihead Attention scaled, time masked softmax -- Backward.");
|
| 136 |
+
|
| 137 |
+
m.def("scaled_masked_softmax_get_batch_per_block",
|
| 138 |
+
&multihead_attn::fused_softmax::scaled_masked_softmax::get_batch_per_block,
|
| 139 |
+
"Return Batch per block size."
|
| 140 |
+
);
|
| 141 |
+
|
| 142 |
+
m.def("scaled_upper_triang_masked_softmax_forward",
|
| 143 |
+
&multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::fwd,
|
| 144 |
+
"Self Multihead Attention scaled, time masked softmax -- Forward.");
|
| 145 |
+
m.def("scaled_upper_triang_masked_softmax_backward",
|
| 146 |
+
&multihead_attn::fused_softmax::scaled_upper_triang_masked_softmax::bwd,
|
| 147 |
+
"Self Multihead Attention scaled, time masked softmax -- Backward.");
|
| 148 |
+
}
|
Code/Baselines/flash-attention/csrc/fused_softmax/scaled_masked_softmax.h
ADDED
|
@@ -0,0 +1,528 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* coding=utf-8
|
| 2 |
+
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#pragma once
|
| 18 |
+
|
| 19 |
+
#include <assert.h>
|
| 20 |
+
#include <cuda_fp16.h>
|
| 21 |
+
#include <cfloat>
|
| 22 |
+
#include <limits>
|
| 23 |
+
#include <stdint.h>
|
| 24 |
+
#include <cuda_fp16.h>
|
| 25 |
+
#include <c10/macros/Macros.h>
|
| 26 |
+
|
| 27 |
+
namespace {
|
| 28 |
+
|
| 29 |
+
template <typename Datatype, int ELEMENTS_PER_LDG>
|
| 30 |
+
__device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src);
|
| 31 |
+
|
| 32 |
+
template <>
|
| 33 |
+
__device__ __inline__ void copy_vector<c10::BFloat16, 1>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; }
|
| 34 |
+
|
| 35 |
+
template <>
|
| 36 |
+
__device__ __inline__ void copy_vector<c10::BFloat16, 4>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); }
|
| 37 |
+
|
| 38 |
+
template <>
|
| 39 |
+
__device__ __inline__ void copy_vector<c10::Half, 1>(c10::Half *dst, const c10::Half *src) { *dst = *src; }
|
| 40 |
+
|
| 41 |
+
template <>
|
| 42 |
+
__device__ __inline__ void copy_vector<c10::Half, 4>(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); }
|
| 43 |
+
|
| 44 |
+
template <>
|
| 45 |
+
__device__ __inline__ void copy_vector<uint8_t, 1>(uint8_t *dst, const uint8_t *src) { *dst = *src; }
|
| 46 |
+
|
| 47 |
+
template <>
|
| 48 |
+
__device__ __inline__ void copy_vector<uint8_t, 4>(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); }
|
| 49 |
+
|
| 50 |
+
int log2_ceil(int value) {
|
| 51 |
+
int log2_value = 0;
|
| 52 |
+
while ((1 << log2_value) < value) ++log2_value;
|
| 53 |
+
return log2_value;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
template<typename T>
|
| 57 |
+
struct Add {
|
| 58 |
+
__device__ __forceinline__ T operator()(T a, T b) const {
|
| 59 |
+
return a + b;
|
| 60 |
+
}
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
template<typename T>
|
| 64 |
+
struct Max {
|
| 65 |
+
__device__ __forceinline__ T operator()(T a, T b) const {
|
| 66 |
+
return a < b ? b : a;
|
| 67 |
+
}
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
template <typename T>
|
| 71 |
+
__device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
|
| 72 |
+
{
|
| 73 |
+
#if CUDA_VERSION >= 9000
|
| 74 |
+
return __shfl_xor_sync(mask, value, laneMask, width);
|
| 75 |
+
#else
|
| 76 |
+
return __shfl_xor(value, laneMask, width);
|
| 77 |
+
#endif
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp>
|
| 81 |
+
__device__ __forceinline__ void warp_reduce(acc_t* sum) {
|
| 82 |
+
ReduceOp<acc_t> r;
|
| 83 |
+
#pragma unroll
|
| 84 |
+
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
|
| 85 |
+
#pragma unroll
|
| 86 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 87 |
+
acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE);
|
| 88 |
+
sum[i] = r(sum[i], b);
|
| 89 |
+
}
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
/*
|
| 94 |
+
* Extended softmax (from native aten pytorch) with following additional features
|
| 95 |
+
* 1) input scaling
|
| 96 |
+
* 2) Explicit masking
|
| 97 |
+
*/
|
| 98 |
+
template <typename input_t, typename output_t, typename acc_t, int log2_elements>
|
| 99 |
+
__global__ void scaled_masked_softmax_warp_forward(
|
| 100 |
+
output_t *dst,
|
| 101 |
+
const input_t *src,
|
| 102 |
+
const uint8_t *mask,
|
| 103 |
+
const acc_t scale,
|
| 104 |
+
int micro_batch_size,
|
| 105 |
+
int element_count,
|
| 106 |
+
int pad_batches)
|
| 107 |
+
{
|
| 108 |
+
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
|
| 109 |
+
// warp_size of method warp_softmax_forward_kernel.
|
| 110 |
+
constexpr int next_power_of_two = 1 << log2_elements;
|
| 111 |
+
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
|
| 112 |
+
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
|
| 113 |
+
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
|
| 114 |
+
constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
|
| 115 |
+
|
| 116 |
+
// blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, )
|
| 117 |
+
// gridDim/blockIdx = (seq_len, attn_heads, batches)
|
| 118 |
+
int first_batch = (blockDim.y * (blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z))+ threadIdx.y) * WARP_BATCH;
|
| 119 |
+
int pad_first_batch = 0;
|
| 120 |
+
if (pad_batches != 1) { // bert style
|
| 121 |
+
pad_first_batch = (blockDim.y * (blockIdx.x + gridDim.x * blockIdx.z) + threadIdx.y) * WARP_BATCH;
|
| 122 |
+
} else { // gpt2 style
|
| 123 |
+
pad_first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
// micro_batch_size might not be a multiple of WARP_BATCH. Check how
|
| 127 |
+
// many batches have to computed within this WARP.
|
| 128 |
+
int local_batches = micro_batch_size - first_batch;
|
| 129 |
+
if (local_batches > WARP_BATCH)
|
| 130 |
+
local_batches = WARP_BATCH;
|
| 131 |
+
|
| 132 |
+
// there might be multiple batches per warp. compute the index within the batch
|
| 133 |
+
int local_idx = threadIdx.x;
|
| 134 |
+
|
| 135 |
+
src += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
|
| 136 |
+
dst += first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
|
| 137 |
+
mask += pad_first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
|
| 138 |
+
|
| 139 |
+
// load data from global memory
|
| 140 |
+
acc_t elements[WARP_BATCH][WARP_ITERATIONS];
|
| 141 |
+
input_t temp_data[ELEMENTS_PER_LDG_STG];
|
| 142 |
+
uint8_t temp_mask[ELEMENTS_PER_LDG_STG];
|
| 143 |
+
#pragma unroll
|
| 144 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 145 |
+
int batch_element_count = (i >= local_batches) ? 0 : element_count;
|
| 146 |
+
|
| 147 |
+
#pragma unroll
|
| 148 |
+
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
|
| 149 |
+
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
|
| 150 |
+
|
| 151 |
+
if (element_index < batch_element_count) {
|
| 152 |
+
int itr_idx = i*element_count+it*WARP_SIZE;
|
| 153 |
+
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_data, src + itr_idx);
|
| 154 |
+
copy_vector<uint8_t, ELEMENTS_PER_LDG_STG>(temp_mask, mask + itr_idx);
|
| 155 |
+
|
| 156 |
+
#pragma unroll
|
| 157 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 158 |
+
if (temp_mask[element] != 1) {
|
| 159 |
+
elements[i][it + element] = (acc_t)temp_data[element] * scale;
|
| 160 |
+
} else {
|
| 161 |
+
elements[i][it + element] = -10000.0;
|
| 162 |
+
}
|
| 163 |
+
}
|
| 164 |
+
} else {
|
| 165 |
+
#pragma unroll
|
| 166 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 167 |
+
elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
|
| 168 |
+
}
|
| 169 |
+
}
|
| 170 |
+
}
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
// compute max_value
|
| 174 |
+
acc_t max_value[WARP_BATCH];
|
| 175 |
+
#pragma unroll
|
| 176 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 177 |
+
max_value[i] = elements[i][0];
|
| 178 |
+
#pragma unroll
|
| 179 |
+
for (int it = 1; it < WARP_ITERATIONS; ++it) {
|
| 180 |
+
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
|
| 181 |
+
}
|
| 182 |
+
}
|
| 183 |
+
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
|
| 184 |
+
|
| 185 |
+
// compute scale value to account for full mask
|
| 186 |
+
acc_t scale_value[WARP_BATCH];
|
| 187 |
+
#pragma unroll
|
| 188 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 189 |
+
scale_value[i] = (max_value[i] == -10000.0) ? 0.0 : 1.0;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
acc_t sum[WARP_BATCH] { 0.0f };
|
| 193 |
+
#pragma unroll
|
| 194 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 195 |
+
#pragma unroll
|
| 196 |
+
for (int it = 0; it < WARP_ITERATIONS; ++it) {
|
| 197 |
+
elements[i][it] = std::exp((elements[i][it] - max_value[i]));
|
| 198 |
+
sum[i] += elements[i][it];
|
| 199 |
+
}
|
| 200 |
+
}
|
| 201 |
+
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
|
| 202 |
+
|
| 203 |
+
// store result
|
| 204 |
+
output_t out[ELEMENTS_PER_LDG_STG];
|
| 205 |
+
#pragma unroll
|
| 206 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 207 |
+
if (i >= local_batches)
|
| 208 |
+
break;
|
| 209 |
+
#pragma unroll
|
| 210 |
+
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
|
| 211 |
+
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
|
| 212 |
+
if (element_index < element_count) {
|
| 213 |
+
#pragma unroll
|
| 214 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 215 |
+
out[element] = elements[i][it + element] * scale_value[i]/ sum[i];
|
| 216 |
+
}
|
| 217 |
+
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(dst + i * element_count + it * WARP_SIZE, out);
|
| 218 |
+
} else {
|
| 219 |
+
break;
|
| 220 |
+
}
|
| 221 |
+
}
|
| 222 |
+
}
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
template <typename input_t, typename output_t, typename acc_t, int log2_elements>
|
| 226 |
+
__global__ void scaled_masked_softmax_warp_backward(
|
| 227 |
+
output_t *gradInput,
|
| 228 |
+
input_t *grad,
|
| 229 |
+
const input_t *output,
|
| 230 |
+
acc_t scale,
|
| 231 |
+
int micro_batch_size,
|
| 232 |
+
int element_count)
|
| 233 |
+
{
|
| 234 |
+
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
|
| 235 |
+
// warp_size of method warp_softmax_backward_kernel.
|
| 236 |
+
constexpr int next_power_of_two = 1 << log2_elements;
|
| 237 |
+
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
|
| 238 |
+
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
|
| 239 |
+
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
|
| 240 |
+
constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
|
| 241 |
+
|
| 242 |
+
// blockDim/threadIdx = (WARP_SIZE, WARPS_PER_BLOCK, )
|
| 243 |
+
// gridDim/blockIdx = (seq_len, attn_heads, batches)
|
| 244 |
+
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH;
|
| 245 |
+
|
| 246 |
+
// micro_batch_size might not be a multiple of WARP_BATCH. Check how
|
| 247 |
+
// many batches have to computed within this WARP.
|
| 248 |
+
int local_batches = micro_batch_size - first_batch;
|
| 249 |
+
if (local_batches > WARP_BATCH)
|
| 250 |
+
local_batches = WARP_BATCH;
|
| 251 |
+
|
| 252 |
+
// there might be multiple batches per warp. compute the index within the batch
|
| 253 |
+
int local_idx = threadIdx.x;
|
| 254 |
+
|
| 255 |
+
// the first element to process by the current thread
|
| 256 |
+
int thread_offset = first_batch * element_count + ELEMENTS_PER_LDG_STG * local_idx;
|
| 257 |
+
grad += thread_offset;
|
| 258 |
+
output += thread_offset;
|
| 259 |
+
gradInput += thread_offset;
|
| 260 |
+
|
| 261 |
+
// load data from global memory
|
| 262 |
+
acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f };
|
| 263 |
+
acc_t output_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f };
|
| 264 |
+
input_t temp_grad[ELEMENTS_PER_LDG_STG];
|
| 265 |
+
input_t temp_output[ELEMENTS_PER_LDG_STG];
|
| 266 |
+
#pragma unroll
|
| 267 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 268 |
+
int batch_element_count = (i >= local_batches) ? 0 : element_count;
|
| 269 |
+
|
| 270 |
+
#pragma unroll
|
| 271 |
+
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
|
| 272 |
+
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
|
| 273 |
+
if (element_index < batch_element_count) {
|
| 274 |
+
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_grad, grad + i * element_count + it * WARP_SIZE);
|
| 275 |
+
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_output, output + i * element_count + it * WARP_SIZE);
|
| 276 |
+
|
| 277 |
+
#pragma unroll
|
| 278 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 279 |
+
output_reg[i][it + element] = (acc_t)temp_output[element];
|
| 280 |
+
}
|
| 281 |
+
#pragma unroll
|
| 282 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 283 |
+
grad_reg[i][it + element] = (acc_t)temp_grad[element] * output_reg[i][it + element];
|
| 284 |
+
}
|
| 285 |
+
}
|
| 286 |
+
}
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
acc_t sum[WARP_BATCH];
|
| 290 |
+
#pragma unroll
|
| 291 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 292 |
+
sum[i] = grad_reg[i][0];
|
| 293 |
+
#pragma unroll
|
| 294 |
+
for (int it = 1; it < WARP_ITERATIONS; ++it) {
|
| 295 |
+
sum[i] += grad_reg[i][it];
|
| 296 |
+
}
|
| 297 |
+
}
|
| 298 |
+
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
|
| 299 |
+
|
| 300 |
+
// store result
|
| 301 |
+
#pragma unroll
|
| 302 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 303 |
+
if (i >= local_batches)
|
| 304 |
+
break;
|
| 305 |
+
#pragma unroll
|
| 306 |
+
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
|
| 307 |
+
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
|
| 308 |
+
if (element_index < element_count) {
|
| 309 |
+
// compute gradients
|
| 310 |
+
output_t out[ELEMENTS_PER_LDG_STG];
|
| 311 |
+
#pragma unroll
|
| 312 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 313 |
+
out[element] = (output_t)(scale * (grad_reg[i][it + element] - output_reg[i][it + element] * sum[i]));
|
| 314 |
+
}
|
| 315 |
+
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(gradInput + i * element_count + it * WARP_SIZE, out);
|
| 316 |
+
}
|
| 317 |
+
}
|
| 318 |
+
}
|
| 319 |
+
}
|
| 320 |
+
} // end of anonymous namespace
|
| 321 |
+
|
| 322 |
+
int get_batch_per_block(int query_seq_len, int key_seq_len, int batches, int attn_heads){
|
| 323 |
+
int log2_elements = log2_ceil(key_seq_len);
|
| 324 |
+
const int next_power_of_two = 1 << log2_elements;
|
| 325 |
+
|
| 326 |
+
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
|
| 327 |
+
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
|
| 328 |
+
|
| 329 |
+
constexpr int threads_per_block = 128;
|
| 330 |
+
int warps_per_block = (threads_per_block / warp_size);
|
| 331 |
+
int batches_per_block = warps_per_block * batches_per_warp;
|
| 332 |
+
|
| 333 |
+
return batches_per_block;
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
template<typename input_t, typename output_t, typename acc_t>
|
| 337 |
+
void dispatch_scaled_masked_softmax_forward(
|
| 338 |
+
output_t *dst,
|
| 339 |
+
const input_t *src,
|
| 340 |
+
const uint8_t *mask,
|
| 341 |
+
const input_t scale,
|
| 342 |
+
int query_seq_len,
|
| 343 |
+
int key_seq_len,
|
| 344 |
+
int batches,
|
| 345 |
+
int attn_heads,
|
| 346 |
+
int pad_batches)
|
| 347 |
+
{
|
| 348 |
+
TORCH_INTERNAL_ASSERT(key_seq_len >= 0 && key_seq_len <= 8192 );
|
| 349 |
+
if (key_seq_len == 0) {
|
| 350 |
+
return;
|
| 351 |
+
} else {
|
| 352 |
+
int log2_elements = log2_ceil(key_seq_len);
|
| 353 |
+
const int next_power_of_two = 1 << log2_elements;
|
| 354 |
+
int batch_count = batches * attn_heads * query_seq_len;
|
| 355 |
+
|
| 356 |
+
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
|
| 357 |
+
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
|
| 358 |
+
|
| 359 |
+
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
|
| 360 |
+
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
|
| 361 |
+
|
| 362 |
+
// use 128 threads per block to maximimize gpu utilization
|
| 363 |
+
constexpr int threads_per_block = 128;
|
| 364 |
+
|
| 365 |
+
int warps_per_block = (threads_per_block / warp_size);
|
| 366 |
+
int batches_per_block = warps_per_block * batches_per_warp;
|
| 367 |
+
TORCH_INTERNAL_ASSERT(query_seq_len%batches_per_block == 0);
|
| 368 |
+
dim3 blocks(query_seq_len/batches_per_block, attn_heads, batches);
|
| 369 |
+
dim3 threads(warp_size, warps_per_block, 1);
|
| 370 |
+
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
|
| 371 |
+
switch (log2_elements) {
|
| 372 |
+
case 0: // 1
|
| 373 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 0>
|
| 374 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 375 |
+
break;
|
| 376 |
+
case 1: // 2
|
| 377 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 1>
|
| 378 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 379 |
+
break;
|
| 380 |
+
case 2: // 4
|
| 381 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 2>
|
| 382 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 383 |
+
break;
|
| 384 |
+
case 3: // 8
|
| 385 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 3>
|
| 386 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 387 |
+
break;
|
| 388 |
+
case 4: // 16
|
| 389 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 4>
|
| 390 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 391 |
+
break;
|
| 392 |
+
case 5: // 32
|
| 393 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 5>
|
| 394 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 395 |
+
break;
|
| 396 |
+
case 6: // 64
|
| 397 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 6>
|
| 398 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 399 |
+
break;
|
| 400 |
+
case 7: // 128
|
| 401 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 7>
|
| 402 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 403 |
+
break;
|
| 404 |
+
case 8: // 256
|
| 405 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 8>
|
| 406 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 407 |
+
break;
|
| 408 |
+
case 9: // 512
|
| 409 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 9>
|
| 410 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 411 |
+
break;
|
| 412 |
+
case 10: // 1024
|
| 413 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 10>
|
| 414 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 415 |
+
break;
|
| 416 |
+
case 11: // 2048
|
| 417 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 11>
|
| 418 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 419 |
+
break;
|
| 420 |
+
case 12: // 4096
|
| 421 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 12>
|
| 422 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 423 |
+
break;
|
| 424 |
+
case 13: // 8192
|
| 425 |
+
scaled_masked_softmax_warp_forward<input_t, output_t, acc_t, 13>
|
| 426 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, mask, scale, batch_count, key_seq_len, pad_batches);
|
| 427 |
+
break;
|
| 428 |
+
default:
|
| 429 |
+
break;
|
| 430 |
+
}
|
| 431 |
+
}
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
template<typename input_t, typename output_t, typename acc_t>
|
| 435 |
+
void dispatch_scaled_masked_softmax_backward(
|
| 436 |
+
output_t *grad_input,
|
| 437 |
+
input_t *grad,
|
| 438 |
+
const input_t *output,
|
| 439 |
+
const acc_t scale,
|
| 440 |
+
int query_seq_len,
|
| 441 |
+
int key_seq_len,
|
| 442 |
+
int batches,
|
| 443 |
+
int attn_heads)
|
| 444 |
+
{
|
| 445 |
+
TORCH_INTERNAL_ASSERT( key_seq_len >= 0 && key_seq_len <= 8192 );
|
| 446 |
+
if (key_seq_len == 0) {
|
| 447 |
+
return;
|
| 448 |
+
} else {
|
| 449 |
+
int log2_elements = log2_ceil(key_seq_len);
|
| 450 |
+
const int next_power_of_two = 1 << log2_elements;
|
| 451 |
+
int batch_count = batches * attn_heads * query_seq_len;
|
| 452 |
+
|
| 453 |
+
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward.
|
| 454 |
+
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
|
| 455 |
+
|
| 456 |
+
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward.
|
| 457 |
+
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
|
| 458 |
+
|
| 459 |
+
// use 128 threads per block to maximimize gpu utilization
|
| 460 |
+
constexpr int threads_per_block = 128;
|
| 461 |
+
|
| 462 |
+
int warps_per_block = (threads_per_block / warp_size);
|
| 463 |
+
int batches_per_block = warps_per_block * batches_per_warp;
|
| 464 |
+
int blocks = batch_count/batches_per_block;
|
| 465 |
+
dim3 threads(warp_size, warps_per_block, 1);
|
| 466 |
+
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
|
| 467 |
+
switch (log2_elements) {
|
| 468 |
+
case 0: // 1
|
| 469 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 0>
|
| 470 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 471 |
+
break;
|
| 472 |
+
case 1: // 2
|
| 473 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 1>
|
| 474 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 475 |
+
break;
|
| 476 |
+
case 2: // 4
|
| 477 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 2>
|
| 478 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 479 |
+
break;
|
| 480 |
+
case 3: // 8
|
| 481 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 3>
|
| 482 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 483 |
+
break;
|
| 484 |
+
case 4: // 16
|
| 485 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 4>
|
| 486 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 487 |
+
break;
|
| 488 |
+
case 5: // 32
|
| 489 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 5>
|
| 490 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 491 |
+
break;
|
| 492 |
+
case 6: // 64
|
| 493 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 6>
|
| 494 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 495 |
+
break;
|
| 496 |
+
case 7: // 128
|
| 497 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 7>
|
| 498 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 499 |
+
break;
|
| 500 |
+
case 8: // 256
|
| 501 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 8>
|
| 502 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 503 |
+
break;
|
| 504 |
+
case 9: // 512
|
| 505 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 9>
|
| 506 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 507 |
+
break;
|
| 508 |
+
case 10: // 1024
|
| 509 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 10>
|
| 510 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 511 |
+
break;
|
| 512 |
+
case 11: // 2048
|
| 513 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 11>
|
| 514 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 515 |
+
break;
|
| 516 |
+
case 12: // 4096
|
| 517 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 12>
|
| 518 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 519 |
+
break;
|
| 520 |
+
case 13: // 8192
|
| 521 |
+
scaled_masked_softmax_warp_backward<input_t, output_t, acc_t, 13>
|
| 522 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, key_seq_len);
|
| 523 |
+
break;
|
| 524 |
+
default:
|
| 525 |
+
break;
|
| 526 |
+
}
|
| 527 |
+
}
|
| 528 |
+
}
|
Code/Baselines/flash-attention/csrc/fused_softmax/scaled_masked_softmax_cuda.cu
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* coding=utf-8
|
| 2 |
+
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#include <ATen/ATen.h>
|
| 18 |
+
#include <cuda.h>
|
| 19 |
+
#include <cuda_runtime.h>
|
| 20 |
+
#include <cuda_fp16.h>
|
| 21 |
+
#include <cuda_profiler_api.h>
|
| 22 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 23 |
+
#include <torch/extension.h>
|
| 24 |
+
#include "scaled_masked_softmax.h"
|
| 25 |
+
#include "type_shim.h"
|
| 26 |
+
|
| 27 |
+
namespace multihead_attn {
|
| 28 |
+
namespace fused_softmax {
|
| 29 |
+
namespace scaled_masked_softmax {
|
| 30 |
+
|
| 31 |
+
int get_batch_per_block_cuda(int query_seq_len, int key_seq_len, int batches, int attn_heads){
|
| 32 |
+
return get_batch_per_block(query_seq_len, key_seq_len, batches, attn_heads);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
torch::Tensor fwd_cuda(
|
| 37 |
+
torch::Tensor const& input,
|
| 38 |
+
torch::Tensor const& mask,
|
| 39 |
+
float scale_factor)
|
| 40 |
+
{
|
| 41 |
+
// input is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len]
|
| 42 |
+
const int batches = input.size(0);
|
| 43 |
+
const int pad_batches = mask.size(0);
|
| 44 |
+
const int attn_heads = input.size(1);
|
| 45 |
+
const int query_seq_len = input.size(2);
|
| 46 |
+
const int key_seq_len = input.size(3);
|
| 47 |
+
TORCH_INTERNAL_ASSERT(key_seq_len <= 8192);
|
| 48 |
+
TORCH_INTERNAL_ASSERT(query_seq_len > 1);
|
| 49 |
+
TORCH_INTERNAL_ASSERT(pad_batches == 1 || pad_batches == batches);
|
| 50 |
+
TORCH_INTERNAL_ASSERT(mask.size(1) == 1);
|
| 51 |
+
TORCH_INTERNAL_ASSERT(mask.size(2) == query_seq_len);
|
| 52 |
+
TORCH_INTERNAL_ASSERT(mask.size(3) == key_seq_len);
|
| 53 |
+
|
| 54 |
+
// Output
|
| 55 |
+
auto act_options = input.options().requires_grad(false);
|
| 56 |
+
torch::Tensor softmax_results =
|
| 57 |
+
torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options);
|
| 58 |
+
|
| 59 |
+
// Softmax Intermediate Result Ptr
|
| 60 |
+
void* input_ptr = static_cast<void*>(input.data_ptr());
|
| 61 |
+
void* mask_ptr = static_cast<void*>(mask.data_ptr());
|
| 62 |
+
void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
|
| 63 |
+
|
| 64 |
+
DISPATCH_HALF_AND_BFLOAT(
|
| 65 |
+
input.scalar_type(),
|
| 66 |
+
"dispatch_scaled_masked_softmax_forward",
|
| 67 |
+
dispatch_scaled_masked_softmax_forward<scalar_t, scalar_t, float>(
|
| 68 |
+
reinterpret_cast<scalar_t*>(softmax_results_ptr),
|
| 69 |
+
reinterpret_cast<const scalar_t*>(input_ptr),
|
| 70 |
+
reinterpret_cast<const uint8_t*>(mask_ptr),
|
| 71 |
+
scale_factor,
|
| 72 |
+
query_seq_len,
|
| 73 |
+
key_seq_len,
|
| 74 |
+
batches,
|
| 75 |
+
attn_heads,
|
| 76 |
+
pad_batches
|
| 77 |
+
);
|
| 78 |
+
);
|
| 79 |
+
return softmax_results;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
torch::Tensor bwd_cuda(
|
| 83 |
+
torch::Tensor const& output_grads_,
|
| 84 |
+
torch::Tensor const& softmax_results_,
|
| 85 |
+
float scale_factor) {
|
| 86 |
+
|
| 87 |
+
auto output_grads = output_grads_.contiguous();
|
| 88 |
+
auto softmax_results = softmax_results_.contiguous();
|
| 89 |
+
|
| 90 |
+
//output grads is a 4d tensor with dimensions [batches, attn_heads, seq_len, seq_len]
|
| 91 |
+
const int batches = output_grads.size(0);
|
| 92 |
+
const int attn_heads = output_grads.size(1);
|
| 93 |
+
const int query_seq_len = output_grads.size(2);
|
| 94 |
+
const int key_seq_len = output_grads.size(3);
|
| 95 |
+
|
| 96 |
+
auto act_options = output_grads.options().requires_grad(false);
|
| 97 |
+
torch::Tensor input_grads =
|
| 98 |
+
torch::empty({batches, attn_heads, query_seq_len, key_seq_len}, act_options);
|
| 99 |
+
void* input_grads_ptr = static_cast<void*>(input_grads.data_ptr());
|
| 100 |
+
void* output_grads_ptr = static_cast<void*>(output_grads.data_ptr());
|
| 101 |
+
|
| 102 |
+
//Softmax Grad
|
| 103 |
+
DISPATCH_HALF_AND_BFLOAT(
|
| 104 |
+
output_grads_.scalar_type(),
|
| 105 |
+
"dispatch_scaled_masked_softmax_backward",
|
| 106 |
+
dispatch_scaled_masked_softmax_backward<scalar_t, scalar_t, float>(
|
| 107 |
+
reinterpret_cast<scalar_t*>(input_grads_ptr),
|
| 108 |
+
reinterpret_cast<scalar_t*>(output_grads_ptr),
|
| 109 |
+
reinterpret_cast<scalar_t const*>(softmax_results.data_ptr()),
|
| 110 |
+
scale_factor,
|
| 111 |
+
query_seq_len,
|
| 112 |
+
key_seq_len,
|
| 113 |
+
batches,
|
| 114 |
+
attn_heads
|
| 115 |
+
);
|
| 116 |
+
);
|
| 117 |
+
return input_grads;
|
| 118 |
+
}
|
| 119 |
+
}
|
| 120 |
+
}
|
| 121 |
+
}
|
Code/Baselines/flash-attention/csrc/fused_softmax/scaled_upper_triang_masked_softmax.h
ADDED
|
@@ -0,0 +1,529 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* coding=utf-8
|
| 2 |
+
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#pragma once
|
| 18 |
+
|
| 19 |
+
#include <assert.h>
|
| 20 |
+
#include <cuda_fp16.h>
|
| 21 |
+
#include <cfloat>
|
| 22 |
+
#include <limits>
|
| 23 |
+
#include <stdint.h>
|
| 24 |
+
#include <c10/macros/Macros.h>
|
| 25 |
+
|
| 26 |
+
namespace {
|
| 27 |
+
|
| 28 |
+
template <typename Datatype, int ELEMENTS_PER_LDG>
|
| 29 |
+
__device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src);
|
| 30 |
+
|
| 31 |
+
template <>
|
| 32 |
+
__device__ __inline__ void copy_vector<c10::BFloat16, 1>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; }
|
| 33 |
+
|
| 34 |
+
template <>
|
| 35 |
+
__device__ __inline__ void copy_vector<c10::BFloat16, 4>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); }
|
| 36 |
+
|
| 37 |
+
template <>
|
| 38 |
+
__device__ __inline__ void copy_vector<c10::Half, 1>(c10::Half *dst, const c10::Half *src) { *dst = *src; }
|
| 39 |
+
|
| 40 |
+
template <>
|
| 41 |
+
__device__ __inline__ void copy_vector<c10::Half, 4>(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); }
|
| 42 |
+
|
| 43 |
+
template <>
|
| 44 |
+
__device__ __inline__ void copy_vector<uint8_t, 1>(uint8_t *dst, const uint8_t *src) { *dst = *src; }
|
| 45 |
+
|
| 46 |
+
template <>
|
| 47 |
+
__device__ __inline__ void copy_vector<uint8_t, 4>(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); }
|
| 48 |
+
|
| 49 |
+
template <typename Datatype, int ELEMENTS_PER_LDG>
|
| 50 |
+
__device__ __inline__ void copy_zero_vector(Datatype *dst);
|
| 51 |
+
|
| 52 |
+
template <>
|
| 53 |
+
__device__ __inline__ void copy_zero_vector<c10::BFloat16, 1>(c10::BFloat16 *dst) { *dst = 0.0; }
|
| 54 |
+
|
| 55 |
+
template <>
|
| 56 |
+
__device__ __inline__ void copy_zero_vector<c10::BFloat16, 4>(c10::BFloat16 *dst) { *((float2*) dst) = make_float2(0.0f, 0.0f); }
|
| 57 |
+
|
| 58 |
+
template <>
|
| 59 |
+
__device__ __inline__ void copy_zero_vector<c10::Half, 1>(c10::Half *dst) { *dst = 0.0; }
|
| 60 |
+
|
| 61 |
+
template <>
|
| 62 |
+
__device__ __inline__ void copy_zero_vector<c10::Half, 4>(c10::Half *dst) { *((float2*) dst) = make_float2(0.0f, 0.0f); }
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
int log2_ceil(int value) {
|
| 66 |
+
int log2_value = 0;
|
| 67 |
+
while ((1 << log2_value) < value) ++log2_value;
|
| 68 |
+
return log2_value;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
template<typename T>
|
| 72 |
+
struct Add {
|
| 73 |
+
__device__ __forceinline__ T operator()(T a, T b) const {
|
| 74 |
+
return a + b;
|
| 75 |
+
}
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
template<typename T>
|
| 79 |
+
struct Max {
|
| 80 |
+
__device__ __forceinline__ T operator()(T a, T b) const {
|
| 81 |
+
return a < b ? b : a;
|
| 82 |
+
}
|
| 83 |
+
};
|
| 84 |
+
|
| 85 |
+
template <typename T>
|
| 86 |
+
__device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
|
| 87 |
+
{
|
| 88 |
+
#if CUDA_VERSION >= 9000
|
| 89 |
+
return __shfl_xor_sync(mask, value, laneMask, width);
|
| 90 |
+
#else
|
| 91 |
+
return __shfl_xor(value, laneMask, width);
|
| 92 |
+
#endif
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp>
|
| 96 |
+
__device__ __forceinline__ void warp_reduce(acc_t* sum) {
|
| 97 |
+
ReduceOp<acc_t> r;
|
| 98 |
+
#pragma unroll
|
| 99 |
+
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
|
| 100 |
+
#pragma unroll
|
| 101 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 102 |
+
acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE);
|
| 103 |
+
sum[i] = r(sum[i], b);
|
| 104 |
+
}
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
/*
|
| 109 |
+
* Extended softmax (from native aten pytorch) with following additional features
|
| 110 |
+
* 1) input scaling
|
| 111 |
+
* 2) Implicit time (diagonal masking)
|
| 112 |
+
*/
|
| 113 |
+
template <typename input_t, typename output_t, typename acc_t, int log2_elements>
|
| 114 |
+
__global__ void scaled_upper_triang_masked_softmax_warp_forward(
|
| 115 |
+
output_t *dst,
|
| 116 |
+
const input_t *src,
|
| 117 |
+
const acc_t scale,
|
| 118 |
+
int micro_batch_size,
|
| 119 |
+
int stride,
|
| 120 |
+
int element_count)
|
| 121 |
+
{
|
| 122 |
+
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
|
| 123 |
+
// warp_size of method warp_softmax_forward_kernel.
|
| 124 |
+
constexpr int next_power_of_two = 1 << log2_elements;
|
| 125 |
+
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
|
| 126 |
+
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
|
| 127 |
+
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
|
| 128 |
+
constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
|
| 129 |
+
|
| 130 |
+
int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x;
|
| 131 |
+
int local_seq = blockIdx.x + 1;
|
| 132 |
+
int warp_iteration_limit = (local_seq + ELEMENTS_PER_LDG_STG * WARP_SIZE - 1)/ WARP_SIZE;
|
| 133 |
+
|
| 134 |
+
// micro_batch_size might not be a multiple of WARP_BATCH. Check how
|
| 135 |
+
// many batches have to computed within this WARP.
|
| 136 |
+
int local_batches = micro_batch_size - first_batch;
|
| 137 |
+
if (local_batches > WARP_BATCH)
|
| 138 |
+
local_batches = WARP_BATCH;
|
| 139 |
+
|
| 140 |
+
// there might be multiple batches per warp. compute the index within the batch
|
| 141 |
+
int local_idx = threadIdx.x;
|
| 142 |
+
|
| 143 |
+
src += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx;
|
| 144 |
+
dst += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx;
|
| 145 |
+
|
| 146 |
+
// load data from global memory
|
| 147 |
+
acc_t elements[WARP_BATCH][WARP_ITERATIONS];
|
| 148 |
+
input_t temp_data[ELEMENTS_PER_LDG_STG];
|
| 149 |
+
#pragma unroll
|
| 150 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 151 |
+
int batch_element_count = (i >= local_batches) ? 0 : local_seq;
|
| 152 |
+
|
| 153 |
+
#pragma unroll
|
| 154 |
+
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
|
| 155 |
+
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
|
| 156 |
+
|
| 157 |
+
if (element_index < batch_element_count) {
|
| 158 |
+
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_data, src + i*element_count*stride + it*WARP_SIZE);
|
| 159 |
+
|
| 160 |
+
#pragma unroll
|
| 161 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 162 |
+
if ((element_index + element) < batch_element_count) {
|
| 163 |
+
elements[i][it+element] = (acc_t)temp_data[element] * scale;
|
| 164 |
+
} else {
|
| 165 |
+
elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
|
| 166 |
+
}
|
| 167 |
+
}
|
| 168 |
+
} else {
|
| 169 |
+
#pragma unroll
|
| 170 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 171 |
+
elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
|
| 172 |
+
}
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
// compute max_value
|
| 178 |
+
acc_t max_value[WARP_BATCH];
|
| 179 |
+
#pragma unroll
|
| 180 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 181 |
+
max_value[i] = elements[i][0];
|
| 182 |
+
#pragma unroll
|
| 183 |
+
for (int it = 1; it < WARP_ITERATIONS; ++it) {
|
| 184 |
+
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
|
| 188 |
+
|
| 189 |
+
acc_t sum[WARP_BATCH] { 0.0f };
|
| 190 |
+
#pragma unroll
|
| 191 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 192 |
+
#pragma unroll
|
| 193 |
+
for (int it = 0; it < WARP_ITERATIONS; ++it) {
|
| 194 |
+
if (it < warp_iteration_limit) {
|
| 195 |
+
elements[i][it] = std::exp((elements[i][it] - max_value[i]));
|
| 196 |
+
sum[i] += elements[i][it];
|
| 197 |
+
}
|
| 198 |
+
}
|
| 199 |
+
}
|
| 200 |
+
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
|
| 201 |
+
|
| 202 |
+
// store result
|
| 203 |
+
output_t out[ELEMENTS_PER_LDG_STG];
|
| 204 |
+
#pragma unroll
|
| 205 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 206 |
+
if (i >= local_batches)
|
| 207 |
+
break;
|
| 208 |
+
#pragma unroll
|
| 209 |
+
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
|
| 210 |
+
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
|
| 211 |
+
|
| 212 |
+
if (element_index < local_seq) {
|
| 213 |
+
|
| 214 |
+
#pragma unroll
|
| 215 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 216 |
+
if (element_index + element < local_seq) {
|
| 217 |
+
out[element] = elements[i][it + element] / sum[i];
|
| 218 |
+
} else {
|
| 219 |
+
out[element] = 0;
|
| 220 |
+
}
|
| 221 |
+
}
|
| 222 |
+
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(dst + i * element_count * stride + it * WARP_SIZE, out);
|
| 223 |
+
} else if (element_index < element_count) {
|
| 224 |
+
copy_zero_vector<output_t, ELEMENTS_PER_LDG_STG>(dst + i * element_count * stride + it * WARP_SIZE);
|
| 225 |
+
} else {
|
| 226 |
+
break;
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
+
}
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
template <typename input_t, typename output_t, typename acc_t, int log2_elements>
|
| 233 |
+
__global__ void scaled_upper_triang_masked_softmax_warp_backward(
|
| 234 |
+
output_t *gradInput,
|
| 235 |
+
input_t *grad,
|
| 236 |
+
const input_t *output,
|
| 237 |
+
acc_t scale,
|
| 238 |
+
int micro_batch_size,
|
| 239 |
+
int stride,
|
| 240 |
+
int element_count)
|
| 241 |
+
{
|
| 242 |
+
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
|
| 243 |
+
// warp_size of method warp_softmax_backward_kernel.
|
| 244 |
+
constexpr int next_power_of_two = 1 << log2_elements;
|
| 245 |
+
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
|
| 246 |
+
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
|
| 247 |
+
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
|
| 248 |
+
constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
|
| 249 |
+
|
| 250 |
+
int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x;
|
| 251 |
+
int local_seq = blockIdx.x + 1;
|
| 252 |
+
|
| 253 |
+
// micro_batch_size might not be a multiple of WARP_BATCH. Check how
|
| 254 |
+
// many batches have to computed within this WARP.
|
| 255 |
+
int local_batches = micro_batch_size - first_batch;
|
| 256 |
+
if (local_batches > WARP_BATCH)
|
| 257 |
+
local_batches = WARP_BATCH;
|
| 258 |
+
|
| 259 |
+
// there might be multiple batches per warp. compute the index within the batch
|
| 260 |
+
int local_idx = threadIdx.x;
|
| 261 |
+
|
| 262 |
+
// the first element to process by the current thread
|
| 263 |
+
int thread_offset = first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx;
|
| 264 |
+
grad += thread_offset;
|
| 265 |
+
output += thread_offset;
|
| 266 |
+
gradInput += thread_offset;
|
| 267 |
+
|
| 268 |
+
// load data from global memory
|
| 269 |
+
acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f };
|
| 270 |
+
acc_t output_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f };
|
| 271 |
+
input_t temp_grad[ELEMENTS_PER_LDG_STG];
|
| 272 |
+
input_t temp_output[ELEMENTS_PER_LDG_STG];
|
| 273 |
+
#pragma unroll
|
| 274 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 275 |
+
int batch_element_count = (i >= local_batches) ? 0 : local_seq;
|
| 276 |
+
|
| 277 |
+
#pragma unroll
|
| 278 |
+
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
|
| 279 |
+
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
|
| 280 |
+
if (element_index < batch_element_count) {
|
| 281 |
+
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_grad, grad + i * element_count * stride + it * WARP_SIZE);
|
| 282 |
+
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_output, output + i * element_count * stride + it * WARP_SIZE);
|
| 283 |
+
|
| 284 |
+
#pragma unroll
|
| 285 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 286 |
+
if (element_index + element < batch_element_count) {
|
| 287 |
+
output_reg[i][it + element] = (acc_t)temp_output[element];
|
| 288 |
+
}
|
| 289 |
+
}
|
| 290 |
+
#pragma unroll
|
| 291 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 292 |
+
if (element_index + element < batch_element_count) {
|
| 293 |
+
grad_reg[i][it + element] = (acc_t)temp_grad[element] * output_reg[i][it + element];
|
| 294 |
+
}
|
| 295 |
+
}
|
| 296 |
+
}
|
| 297 |
+
}
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
acc_t sum[WARP_BATCH];
|
| 301 |
+
#pragma unroll
|
| 302 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 303 |
+
sum[i] = grad_reg[i][0];
|
| 304 |
+
#pragma unroll
|
| 305 |
+
for (int it = 1; it < WARP_ITERATIONS; ++it) {
|
| 306 |
+
sum[i] += grad_reg[i][it];
|
| 307 |
+
}
|
| 308 |
+
}
|
| 309 |
+
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
|
| 310 |
+
|
| 311 |
+
// store result
|
| 312 |
+
#pragma unroll
|
| 313 |
+
for (int i = 0; i < WARP_BATCH; ++i) {
|
| 314 |
+
if (i >= local_batches)
|
| 315 |
+
break;
|
| 316 |
+
#pragma unroll
|
| 317 |
+
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) {
|
| 318 |
+
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
|
| 319 |
+
if (element_index < element_count) {
|
| 320 |
+
// compute gradients
|
| 321 |
+
output_t out[ELEMENTS_PER_LDG_STG];
|
| 322 |
+
#pragma unroll
|
| 323 |
+
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
|
| 324 |
+
out[element] = (output_t)(scale * (grad_reg[i][it + element] - output_reg[i][it + element] * sum[i]));
|
| 325 |
+
}
|
| 326 |
+
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(gradInput + i * element_count * stride + it * WARP_SIZE, out);
|
| 327 |
+
}
|
| 328 |
+
}
|
| 329 |
+
}
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
} // end of anonymous namespace
|
| 333 |
+
|
| 334 |
+
template<typename input_t, typename output_t, typename acc_t>
|
| 335 |
+
void dispatch_scaled_upper_triang_masked_softmax_forward(
|
| 336 |
+
output_t *dst,
|
| 337 |
+
const input_t *src,
|
| 338 |
+
const input_t scale,
|
| 339 |
+
int softmax_elements,
|
| 340 |
+
int softmax_elements_stride,
|
| 341 |
+
int attn_batches)
|
| 342 |
+
{
|
| 343 |
+
TORCH_INTERNAL_ASSERT(softmax_elements >= 0 && softmax_elements <= 8192 );
|
| 344 |
+
if (softmax_elements == 0) {
|
| 345 |
+
return;
|
| 346 |
+
} else {
|
| 347 |
+
int log2_elements = log2_ceil(softmax_elements);
|
| 348 |
+
const int next_power_of_two = 1 << log2_elements;
|
| 349 |
+
int seq_len = softmax_elements;
|
| 350 |
+
int batch_count = attn_batches * seq_len;
|
| 351 |
+
|
| 352 |
+
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward.
|
| 353 |
+
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
|
| 354 |
+
|
| 355 |
+
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward.
|
| 356 |
+
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
|
| 357 |
+
|
| 358 |
+
// use 128 threads per block to maximimize gpu utilization
|
| 359 |
+
constexpr int threads_per_block = 128;
|
| 360 |
+
|
| 361 |
+
int warps_per_block = (threads_per_block / warp_size);
|
| 362 |
+
int batches_per_block = warps_per_block * batches_per_warp;
|
| 363 |
+
TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0);
|
| 364 |
+
|
| 365 |
+
int blocks_per_seq = attn_batches / batches_per_block;
|
| 366 |
+
dim3 blocks(seq_len, blocks_per_seq, 1);
|
| 367 |
+
dim3 threads(warp_size, warps_per_block, 1);
|
| 368 |
+
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
|
| 369 |
+
switch (log2_elements) {
|
| 370 |
+
case 0: // 1
|
| 371 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 0>
|
| 372 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 373 |
+
break;
|
| 374 |
+
case 1: // 2
|
| 375 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 1>
|
| 376 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 377 |
+
break;
|
| 378 |
+
case 2: // 4
|
| 379 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 2>
|
| 380 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 381 |
+
break;
|
| 382 |
+
case 3: // 8
|
| 383 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 3>
|
| 384 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 385 |
+
break;
|
| 386 |
+
case 4: // 16
|
| 387 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 4>
|
| 388 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 389 |
+
break;
|
| 390 |
+
case 5: // 32
|
| 391 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 5>
|
| 392 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 393 |
+
break;
|
| 394 |
+
case 6: // 64
|
| 395 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 6>
|
| 396 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 397 |
+
break;
|
| 398 |
+
case 7: // 128
|
| 399 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 7>
|
| 400 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 401 |
+
break;
|
| 402 |
+
case 8: // 256
|
| 403 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 8>
|
| 404 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 405 |
+
break;
|
| 406 |
+
case 9: // 512
|
| 407 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 9>
|
| 408 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 409 |
+
break;
|
| 410 |
+
case 10: // 1024
|
| 411 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 10>
|
| 412 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 413 |
+
break;
|
| 414 |
+
case 11: // 2048
|
| 415 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 11>
|
| 416 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 417 |
+
break;
|
| 418 |
+
case 12: // 4096
|
| 419 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 12>
|
| 420 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 421 |
+
break;
|
| 422 |
+
case 13: // 8192
|
| 423 |
+
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 13>
|
| 424 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 425 |
+
break;
|
| 426 |
+
default:
|
| 427 |
+
break;
|
| 428 |
+
}
|
| 429 |
+
}
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
template<typename input_t, typename output_t, typename acc_t>
|
| 433 |
+
void dispatch_scaled_upper_triang_masked_softmax_backward(
|
| 434 |
+
output_t *grad_input,
|
| 435 |
+
input_t *grad,
|
| 436 |
+
const input_t *output,
|
| 437 |
+
const acc_t scale,
|
| 438 |
+
int softmax_elements,
|
| 439 |
+
int softmax_elements_stride,
|
| 440 |
+
int attn_batches)
|
| 441 |
+
{
|
| 442 |
+
TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 8192 );
|
| 443 |
+
if (softmax_elements == 0) {
|
| 444 |
+
return;
|
| 445 |
+
} else {
|
| 446 |
+
int log2_elements = log2_ceil(softmax_elements);
|
| 447 |
+
const int next_power_of_two = 1 << log2_elements;
|
| 448 |
+
int seq_len = softmax_elements;
|
| 449 |
+
int batch_count = attn_batches * seq_len;
|
| 450 |
+
|
| 451 |
+
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward.
|
| 452 |
+
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
|
| 453 |
+
|
| 454 |
+
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward.
|
| 455 |
+
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
|
| 456 |
+
|
| 457 |
+
// use 128 threads per block to maximimize gpu utilization
|
| 458 |
+
constexpr int threads_per_block = 128;
|
| 459 |
+
|
| 460 |
+
int warps_per_block = (threads_per_block / warp_size);
|
| 461 |
+
int batches_per_block = warps_per_block * batches_per_warp;
|
| 462 |
+
TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0);
|
| 463 |
+
|
| 464 |
+
int blocks_per_seq = attn_batches / batches_per_block;
|
| 465 |
+
dim3 blocks(seq_len, blocks_per_seq, 1);
|
| 466 |
+
dim3 threads(warp_size, warps_per_block, 1);
|
| 467 |
+
// Launch code would be more elegant if C++ supported FOR CONSTEXPR
|
| 468 |
+
switch (log2_elements) {
|
| 469 |
+
case 0: // 1
|
| 470 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 0>
|
| 471 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 472 |
+
break;
|
| 473 |
+
case 1: // 2
|
| 474 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 1>
|
| 475 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 476 |
+
break;
|
| 477 |
+
case 2: // 4
|
| 478 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 2>
|
| 479 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 480 |
+
break;
|
| 481 |
+
case 3: // 8
|
| 482 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 3>
|
| 483 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 484 |
+
break;
|
| 485 |
+
case 4: // 16
|
| 486 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 4>
|
| 487 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 488 |
+
break;
|
| 489 |
+
case 5: // 32
|
| 490 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 5>
|
| 491 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 492 |
+
break;
|
| 493 |
+
case 6: // 64
|
| 494 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 6>
|
| 495 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 496 |
+
break;
|
| 497 |
+
case 7: // 128
|
| 498 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 7>
|
| 499 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 500 |
+
break;
|
| 501 |
+
case 8: // 256
|
| 502 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 8>
|
| 503 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 504 |
+
break;
|
| 505 |
+
case 9: // 512
|
| 506 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 9>
|
| 507 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 508 |
+
break;
|
| 509 |
+
case 10: // 1024
|
| 510 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 10>
|
| 511 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 512 |
+
break;
|
| 513 |
+
case 11: // 2048
|
| 514 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 11>
|
| 515 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 516 |
+
break;
|
| 517 |
+
case 12: // 4096
|
| 518 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 12>
|
| 519 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 520 |
+
break;
|
| 521 |
+
case 13: // 8192
|
| 522 |
+
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 13>
|
| 523 |
+
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements);
|
| 524 |
+
break;
|
| 525 |
+
default:
|
| 526 |
+
break;
|
| 527 |
+
}
|
| 528 |
+
}
|
| 529 |
+
}
|
Code/Baselines/flash-attention/csrc/fused_softmax/scaled_upper_triang_masked_softmax_cuda.cu
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* coding=utf-8
|
| 2 |
+
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*/
|
| 16 |
+
|
| 17 |
+
#include <ATen/ATen.h>
|
| 18 |
+
#include <cuda.h>
|
| 19 |
+
#include <cuda_runtime.h>
|
| 20 |
+
#include <cuda_fp16.h>
|
| 21 |
+
#include <cuda_profiler_api.h>
|
| 22 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 23 |
+
#include <torch/extension.h>
|
| 24 |
+
#include "scaled_upper_triang_masked_softmax.h"
|
| 25 |
+
#include "type_shim.h"
|
| 26 |
+
|
| 27 |
+
namespace multihead_attn {
|
| 28 |
+
namespace fused_softmax {
|
| 29 |
+
namespace scaled_upper_triang_masked_softmax {
|
| 30 |
+
|
| 31 |
+
torch::Tensor fwd_cuda(
|
| 32 |
+
torch::Tensor const& input,
|
| 33 |
+
float scale_factor)
|
| 34 |
+
{
|
| 35 |
+
// input is a 3d tensor with dimensions [attn_batches, seq_len, seq_len]
|
| 36 |
+
const int attn_batches = input.size(0);
|
| 37 |
+
const int seq_len = input.size(1);
|
| 38 |
+
TORCH_INTERNAL_ASSERT(seq_len <= 8192);
|
| 39 |
+
|
| 40 |
+
// Output
|
| 41 |
+
auto act_options = input.options().requires_grad(false);
|
| 42 |
+
torch::Tensor softmax_results =
|
| 43 |
+
torch::empty({attn_batches, seq_len, seq_len}, act_options);
|
| 44 |
+
|
| 45 |
+
// Softmax Intermediate Result Ptr
|
| 46 |
+
void* input_ptr = static_cast<void*>(input.data_ptr());
|
| 47 |
+
void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
|
| 48 |
+
|
| 49 |
+
DISPATCH_HALF_AND_BFLOAT(
|
| 50 |
+
input.scalar_type(),
|
| 51 |
+
"dispatch_scaled_upper_triang_masked_softmax_forward",
|
| 52 |
+
dispatch_scaled_upper_triang_masked_softmax_forward<scalar_t, scalar_t, float>(
|
| 53 |
+
reinterpret_cast<scalar_t*>(softmax_results_ptr),
|
| 54 |
+
reinterpret_cast<const scalar_t*>(input_ptr),
|
| 55 |
+
scale_factor,
|
| 56 |
+
seq_len,
|
| 57 |
+
seq_len,
|
| 58 |
+
attn_batches);
|
| 59 |
+
);
|
| 60 |
+
return softmax_results;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
torch::Tensor bwd_cuda(
|
| 65 |
+
torch::Tensor const& output_grads_,
|
| 66 |
+
torch::Tensor const& softmax_results_,
|
| 67 |
+
float scale_factor) {
|
| 68 |
+
|
| 69 |
+
auto output_grads = output_grads_.contiguous();
|
| 70 |
+
auto softmax_results = softmax_results_.contiguous();
|
| 71 |
+
|
| 72 |
+
//output grads is a 3d tensor with dimensions [attn_batches, seq_len, seq_len]
|
| 73 |
+
const int attn_batches = output_grads.size(0);
|
| 74 |
+
const int seq_len = output_grads.size(1);
|
| 75 |
+
TORCH_INTERNAL_ASSERT(output_grads.size(1) == output_grads.size(2));
|
| 76 |
+
|
| 77 |
+
void* output_grads_ptr = static_cast<void*>(output_grads.data_ptr());
|
| 78 |
+
|
| 79 |
+
//Softmax Grad
|
| 80 |
+
DISPATCH_HALF_AND_BFLOAT(
|
| 81 |
+
output_grads_.scalar_type(),
|
| 82 |
+
"dispatch_scaled_upper_triang_masked_softmax_backward",
|
| 83 |
+
dispatch_scaled_upper_triang_masked_softmax_backward<scalar_t, scalar_t, float>(
|
| 84 |
+
reinterpret_cast<scalar_t*>(output_grads_ptr),
|
| 85 |
+
reinterpret_cast<scalar_t*>(output_grads_ptr),
|
| 86 |
+
reinterpret_cast<scalar_t const*>(softmax_results.data_ptr()),
|
| 87 |
+
scale_factor,
|
| 88 |
+
seq_len,
|
| 89 |
+
seq_len,
|
| 90 |
+
attn_batches);
|
| 91 |
+
);
|
| 92 |
+
|
| 93 |
+
//backward pass is completely in-place
|
| 94 |
+
return output_grads;
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
}
|
| 98 |
+
}
|
Code/Baselines/flash-attention/csrc/fused_softmax/setup.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copied from https://github.com/NVIDIA/apex/tree/master/csrc/megatron
|
| 2 |
+
# We add the case where seqlen = 4k and seqlen = 8k
|
| 3 |
+
import os
|
| 4 |
+
import subprocess
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from setuptools import setup
|
| 8 |
+
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_cuda_bare_metal_version(cuda_dir):
|
| 12 |
+
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
|
| 13 |
+
output = raw_output.split()
|
| 14 |
+
release_idx = output.index("release") + 1
|
| 15 |
+
release = output[release_idx].split(".")
|
| 16 |
+
bare_metal_major = release[0]
|
| 17 |
+
bare_metal_minor = release[1][0]
|
| 18 |
+
|
| 19 |
+
return raw_output, bare_metal_major, bare_metal_minor
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def append_nvcc_threads(nvcc_extra_args):
|
| 23 |
+
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
|
| 24 |
+
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
|
| 25 |
+
nvcc_threads = os.getenv("NVCC_THREADS") or "4"
|
| 26 |
+
return nvcc_extra_args + ["--threads", nvcc_threads]
|
| 27 |
+
return nvcc_extra_args
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
cc_flag = []
|
| 31 |
+
cc_flag.append("-gencode")
|
| 32 |
+
cc_flag.append("arch=compute_70,code=sm_70")
|
| 33 |
+
cc_flag.append("-gencode")
|
| 34 |
+
cc_flag.append("arch=compute_80,code=sm_80")
|
| 35 |
+
|
| 36 |
+
setup(
|
| 37 |
+
name='fused_softmax_lib',
|
| 38 |
+
ext_modules=[
|
| 39 |
+
CUDAExtension(
|
| 40 |
+
name='fused_softmax_lib',
|
| 41 |
+
sources=['fused_softmax.cpp', 'scaled_masked_softmax_cuda.cu', 'scaled_upper_triang_masked_softmax_cuda.cu'],
|
| 42 |
+
extra_compile_args={
|
| 43 |
+
'cxx': ['-O3',],
|
| 44 |
+
'nvcc': append_nvcc_threads(['-O3', '--use_fast_math'] + cc_flag)
|
| 45 |
+
}
|
| 46 |
+
)
|
| 47 |
+
],
|
| 48 |
+
cmdclass={
|
| 49 |
+
'build_ext': BuildExtension
|
| 50 |
+
})
|
Code/Baselines/flash-attention/csrc/fused_softmax/type_shim.h
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <ATen/ATen.h>
|
| 2 |
+
|
| 3 |
+
#define DISPATCH_HALF_AND_BFLOAT(TYPE, NAME, ...) \
|
| 4 |
+
switch(TYPE) \
|
| 5 |
+
{ \
|
| 6 |
+
case at::ScalarType::Half: \
|
| 7 |
+
{ \
|
| 8 |
+
using scalar_t = at::Half; \
|
| 9 |
+
__VA_ARGS__; \
|
| 10 |
+
break; \
|
| 11 |
+
} \
|
| 12 |
+
case at::ScalarType::BFloat16: \
|
| 13 |
+
{ \
|
| 14 |
+
using scalar_t = at::BFloat16; \
|
| 15 |
+
__VA_ARGS__; \
|
| 16 |
+
break; \
|
| 17 |
+
} \
|
| 18 |
+
default: \
|
| 19 |
+
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
|
| 20 |
+
}
|
Code/Baselines/flash-attention/csrc/layer_norm/README.md
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
This CUDA extension implements fused dropout + residual + LayerNorm, building on
|
| 2 |
+
Apex's [FastLayerNorm](https://github.com/NVIDIA/apex/tree/master/apex/contrib/layer_norm).
|
| 3 |
+
Major changes:
|
| 4 |
+
- Add dropout and residual.
|
| 5 |
+
- Make it work for both pre-norm and post-norm architecture.
|
| 6 |
+
- Support more hidden dimensions (all dimensions divisible by 8, up to 8192).
|
| 7 |
+
- Implement RMSNorm as an option.
|
| 8 |
+
- Support layer norm with parallel residual (e.g., GPT-J, GPT-NeoX, PaLM).
|
| 9 |
+
|
| 10 |
+
If you want to use it for dimensions larger than 8k, please file an issue.
|
| 11 |
+
|
| 12 |
+
This extension has only been tested on A100s.
|
| 13 |
+
|
| 14 |
+
```sh
|
| 15 |
+
cd csrc/layer_norm && pip install .
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
As of 2024-01-05, this extension is no longer used in the FlashAttention repo.
|
| 19 |
+
We've instead switched to a Triton-based
|
| 20 |
+
[implementation](https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/ops/triton/layer_norm.py).
|
Code/Baselines/flash-attention/csrc/layer_norm/ln.h
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <unordered_map>
|
| 4 |
+
#include <cuda_fp16.h>
|
| 5 |
+
#include <cuda_bf16.h>
|
| 6 |
+
|
| 7 |
+
#ifdef OLD_GENERATOR_PATH
|
| 8 |
+
#include <ATen/CUDAGeneratorImpl.h>
|
| 9 |
+
#else
|
| 10 |
+
#include <ATen/cuda/CUDAGeneratorImpl.h>
|
| 11 |
+
#endif
|
| 12 |
+
|
| 13 |
+
namespace layer_norm {
|
| 14 |
+
|
| 15 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 16 |
+
|
| 17 |
+
template<typename Params>
|
| 18 |
+
struct LaunchParams{
|
| 19 |
+
|
| 20 |
+
size_t elts_per_thread;
|
| 21 |
+
size_t workspace_bytes;
|
| 22 |
+
size_t barrier_size;
|
| 23 |
+
|
| 24 |
+
cudaDeviceProp * props;
|
| 25 |
+
|
| 26 |
+
cudaStream_t stream;
|
| 27 |
+
|
| 28 |
+
Params params;
|
| 29 |
+
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 33 |
+
|
| 34 |
+
struct ParamsBase {
|
| 35 |
+
ParamsBase()
|
| 36 |
+
: ctas_per_col(0)
|
| 37 |
+
, rows(0)
|
| 38 |
+
, cols(0)
|
| 39 |
+
, x(nullptr)
|
| 40 |
+
, mu(nullptr)
|
| 41 |
+
, rs(nullptr)
|
| 42 |
+
, gamma(nullptr)
|
| 43 |
+
, gamma1(nullptr)
|
| 44 |
+
, rowscale(nullptr)
|
| 45 |
+
, colscale(nullptr)
|
| 46 |
+
, dropout_keep_p(1.f)
|
| 47 |
+
, dropout_scale(1.f)
|
| 48 |
+
, is_rms_norm(false)
|
| 49 |
+
, workspace(nullptr)
|
| 50 |
+
, barrier(nullptr)
|
| 51 |
+
{
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
// For Multi-CTA, number of different CTA groups. Otherwise same as gridDim.x.
|
| 55 |
+
int ctas_per_col;
|
| 56 |
+
|
| 57 |
+
// Input is interpreted as matrix. We normalize across columns.
|
| 58 |
+
int rows;
|
| 59 |
+
int cols;
|
| 60 |
+
|
| 61 |
+
// Common data pointers.
|
| 62 |
+
void *x0;
|
| 63 |
+
void *x1;
|
| 64 |
+
void *residual;
|
| 65 |
+
void *x;
|
| 66 |
+
void *dmask;
|
| 67 |
+
void *dmask1;
|
| 68 |
+
void *mu;
|
| 69 |
+
void *rs;
|
| 70 |
+
void *gamma;
|
| 71 |
+
void *gamma1;
|
| 72 |
+
void *rowscale;
|
| 73 |
+
void *colscale;
|
| 74 |
+
void *x0_subset;
|
| 75 |
+
void *z_subset;
|
| 76 |
+
|
| 77 |
+
float inverse_cols;
|
| 78 |
+
|
| 79 |
+
float dropout_keep_p;
|
| 80 |
+
float dropout_scale;
|
| 81 |
+
float rowscale_const;
|
| 82 |
+
|
| 83 |
+
bool is_rms_norm;
|
| 84 |
+
|
| 85 |
+
// Multi-CTA workspace in gmem.
|
| 86 |
+
void *workspace;
|
| 87 |
+
|
| 88 |
+
// Multi-CTA sync barriers in gmem.
|
| 89 |
+
int *barrier;
|
| 90 |
+
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 94 |
+
|
| 95 |
+
struct FwdParams : public ParamsBase {
|
| 96 |
+
FwdParams()
|
| 97 |
+
: ParamsBase()
|
| 98 |
+
, z(nullptr)
|
| 99 |
+
, z1(nullptr)
|
| 100 |
+
, beta(nullptr)
|
| 101 |
+
, beta1(nullptr)
|
| 102 |
+
, epsilon(0.f)
|
| 103 |
+
{
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
// Output of LN FWD.
|
| 107 |
+
void *z;
|
| 108 |
+
void *z1;
|
| 109 |
+
void *beta;
|
| 110 |
+
void *beta1;
|
| 111 |
+
float epsilon;
|
| 112 |
+
|
| 113 |
+
// Random state.
|
| 114 |
+
at::PhiloxCudaState philox_args;
|
| 115 |
+
};
|
| 116 |
+
|
| 117 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 118 |
+
|
| 119 |
+
struct BwdParams : public ParamsBase {
|
| 120 |
+
BwdParams()
|
| 121 |
+
: ParamsBase()
|
| 122 |
+
, dz(nullptr)
|
| 123 |
+
, dz1(nullptr)
|
| 124 |
+
, dx(nullptr)
|
| 125 |
+
, dbeta_part(nullptr)
|
| 126 |
+
, dgamma_part(nullptr)
|
| 127 |
+
, dbeta1_part(nullptr)
|
| 128 |
+
, dgamma1_part(nullptr)
|
| 129 |
+
, dcolscale_part(nullptr)
|
| 130 |
+
, dx0(nullptr)
|
| 131 |
+
, dx1(nullptr)
|
| 132 |
+
, dresidual(nullptr)
|
| 133 |
+
, dbeta(nullptr)
|
| 134 |
+
, dgamma(nullptr)
|
| 135 |
+
, dbeta1(nullptr)
|
| 136 |
+
, dgamma1(nullptr)
|
| 137 |
+
, dcolscale(nullptr)
|
| 138 |
+
{
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
// Input: gradient wrt. LN FWD output.
|
| 142 |
+
void *dz;
|
| 143 |
+
void *dz1;
|
| 144 |
+
// Input: gradient wrt residual.
|
| 145 |
+
void *dx;
|
| 146 |
+
|
| 147 |
+
// Workspace for Wgrad pre-reduction.
|
| 148 |
+
void *dbeta_part;
|
| 149 |
+
void *dgamma_part;
|
| 150 |
+
void *dbeta1_part;
|
| 151 |
+
void *dgamma1_part;
|
| 152 |
+
void *dcolscale_part;
|
| 153 |
+
|
| 154 |
+
// Output: Dgrad.
|
| 155 |
+
void *dx0;
|
| 156 |
+
void *dx1;
|
| 157 |
+
void *dresidual;
|
| 158 |
+
// Output: Wgrad.
|
| 159 |
+
void *dbeta;
|
| 160 |
+
void *dgamma;
|
| 161 |
+
void *dbeta1;
|
| 162 |
+
void *dgamma1;
|
| 163 |
+
void *dcolscale;
|
| 164 |
+
|
| 165 |
+
};
|
| 166 |
+
|
| 167 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 168 |
+
|
| 169 |
+
using FwdFunction = std::function<void(LaunchParams<FwdParams>&, const bool)>;
|
| 170 |
+
using BwdFunction = std::function<void(LaunchParams<BwdParams>&, const bool)>;
|
| 171 |
+
using FunctionKey = uint64_t;
|
| 172 |
+
using FwdRegistry = std::unordered_map<FunctionKey, FwdFunction>;
|
| 173 |
+
using BwdRegistry = std::unordered_map<FunctionKey, BwdFunction>;
|
| 174 |
+
|
| 175 |
+
extern FwdRegistry FWD_FUNCS, PARALLEL_FWD_FUNCS;
|
| 176 |
+
extern BwdRegistry BWD_FUNCS, PARALLEL_BWD_FUNCS;
|
| 177 |
+
|
| 178 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 179 |
+
|
| 180 |
+
using fp32 = float;
|
| 181 |
+
using fp16 = half;
|
| 182 |
+
using bf16 = nv_bfloat16;
|
| 183 |
+
|
| 184 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 185 |
+
|
| 186 |
+
template<typename T>
|
| 187 |
+
struct TypeId{};
|
| 188 |
+
|
| 189 |
+
template<>
|
| 190 |
+
struct TypeId<fp16>{
|
| 191 |
+
constexpr static uint32_t Value = 0;
|
| 192 |
+
};
|
| 193 |
+
|
| 194 |
+
template<>
|
| 195 |
+
struct TypeId<bf16>{
|
| 196 |
+
constexpr static uint32_t Value = 1;
|
| 197 |
+
};
|
| 198 |
+
|
| 199 |
+
template<>
|
| 200 |
+
struct TypeId<fp32>{
|
| 201 |
+
constexpr static uint32_t Value = 2;
|
| 202 |
+
};
|
| 203 |
+
|
| 204 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 205 |
+
|
| 206 |
+
template<typename T, int S>
|
| 207 |
+
struct Type2Key{
|
| 208 |
+
constexpr static uint32_t Value = TypeId<T>::Value << S;
|
| 209 |
+
};
|
| 210 |
+
|
| 211 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 212 |
+
|
| 213 |
+
template<typename T>
|
| 214 |
+
struct WeightType2Key : public Type2Key<T, 0>{};
|
| 215 |
+
|
| 216 |
+
template<typename T>
|
| 217 |
+
struct InputType2Key : public Type2Key<T, 2>{};
|
| 218 |
+
|
| 219 |
+
template<typename T>
|
| 220 |
+
struct ResidualType2Key : public Type2Key<T, 4>{};
|
| 221 |
+
|
| 222 |
+
template<typename T>
|
| 223 |
+
struct OutputType2Key : public Type2Key<T, 6>{};
|
| 224 |
+
|
| 225 |
+
template<typename T>
|
| 226 |
+
struct ComputeType2Key : public Type2Key<T, 8>{};
|
| 227 |
+
|
| 228 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 229 |
+
|
| 230 |
+
template<typename W, typename I, typename R, typename O, typename C>
|
| 231 |
+
struct Types2Key{
|
| 232 |
+
constexpr static uint32_t Value = WeightType2Key<W>::Value | InputType2Key<I>::Value | ResidualType2Key<R>::Value | OutputType2Key<O>::Value | ComputeType2Key<C>::Value;
|
| 233 |
+
constexpr static inline uint64_t get(const uint64_t hidden_size){
|
| 234 |
+
constexpr uint64_t type_key = Value;
|
| 235 |
+
return (type_key << 32) | hidden_size;
|
| 236 |
+
}
|
| 237 |
+
};
|
| 238 |
+
|
| 239 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 240 |
+
|
| 241 |
+
template<typename W, typename I, typename R, typename O, typename C, uint64_t HIDDEN_SIZE>
|
| 242 |
+
struct FwdRegistrar{
|
| 243 |
+
FwdRegistrar(FwdFunction f){
|
| 244 |
+
uint64_t key = Types2Key<W,I,R,O,C>::get(HIDDEN_SIZE);
|
| 245 |
+
FWD_FUNCS.insert({ key, f });
|
| 246 |
+
}
|
| 247 |
+
};
|
| 248 |
+
|
| 249 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 250 |
+
|
| 251 |
+
template<typename W, typename I, typename R, typename O, typename C, uint64_t HIDDEN_SIZE>
|
| 252 |
+
struct BwdRegistrar{
|
| 253 |
+
BwdRegistrar(BwdFunction f){
|
| 254 |
+
uint64_t key = Types2Key<W,I,R,O,C>::get(HIDDEN_SIZE);
|
| 255 |
+
BWD_FUNCS.insert({ key, f });
|
| 256 |
+
}
|
| 257 |
+
};
|
| 258 |
+
|
| 259 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 260 |
+
|
| 261 |
+
template<typename W, typename I, typename R, typename O, typename C, uint64_t HIDDEN_SIZE>
|
| 262 |
+
struct FwdParallelRegistrar{
|
| 263 |
+
FwdParallelRegistrar(FwdFunction f){
|
| 264 |
+
uint64_t key = Types2Key<W,I,R,O,C>::get(HIDDEN_SIZE);
|
| 265 |
+
PARALLEL_FWD_FUNCS.insert({ key, f });
|
| 266 |
+
}
|
| 267 |
+
};
|
| 268 |
+
|
| 269 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 270 |
+
|
| 271 |
+
template<typename W, typename I, typename R, typename O, typename C, uint64_t HIDDEN_SIZE>
|
| 272 |
+
struct BwdParallelRegistrar{
|
| 273 |
+
BwdParallelRegistrar(BwdFunction f){
|
| 274 |
+
uint64_t key = Types2Key<W,I,R,O,C>::get(HIDDEN_SIZE);
|
| 275 |
+
PARALLEL_BWD_FUNCS.insert({ key, f });
|
| 276 |
+
}
|
| 277 |
+
};
|
| 278 |
+
|
| 279 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 280 |
+
|
| 281 |
+
} // namespace layer_norm
|
Code/Baselines/flash-attention/csrc/layer_norm/ln_api.cpp
ADDED
|
@@ -0,0 +1,846 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <torch/extension.h>
|
| 2 |
+
#include "ATen/cuda/CUDAContext.h"
|
| 3 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 4 |
+
|
| 5 |
+
#include "ln.h"
|
| 6 |
+
|
| 7 |
+
/*
|
| 8 |
+
|
| 9 |
+
Supported Type combinations:
|
| 10 |
+
|
| 11 |
+
input residual compute weights output
|
| 12 |
+
============================================
|
| 13 |
+
fp32 fp32 fp32 fp32 fp32
|
| 14 |
+
fp16 fp32 fp32 fp32 fp16
|
| 15 |
+
fp16 fp16 fp32 fp32 fp16
|
| 16 |
+
bf16 fp32 fp32 fp32 bf16
|
| 17 |
+
bf16 bf16 fp32 fp32 bf16
|
| 18 |
+
fp16 fp16 fp32 fp16 fp16
|
| 19 |
+
bf16 bf16 fp32 bf16 bf16
|
| 20 |
+
|
| 21 |
+
Remarks:
|
| 22 |
+
Output type = Input type
|
| 23 |
+
Compute always in FP32
|
| 24 |
+
|
| 25 |
+
*/
|
| 26 |
+
|
| 27 |
+
namespace layer_norm {
|
| 28 |
+
|
| 29 |
+
// Create registries and provide runtime versions of config hash functions.
|
| 30 |
+
|
| 31 |
+
FwdRegistry FWD_FUNCS, PARALLEL_FWD_FUNCS;
|
| 32 |
+
BwdRegistry BWD_FUNCS, PARALLEL_BWD_FUNCS;
|
| 33 |
+
|
| 34 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 35 |
+
|
| 36 |
+
uint32_t get_type_id(torch::Dtype dtype){
|
| 37 |
+
if( dtype == torch::kFloat16 ) {
|
| 38 |
+
return TypeId<fp16>::Value;
|
| 39 |
+
} else if( dtype == torch::kBFloat16 ) {
|
| 40 |
+
return TypeId<bf16>::Value;
|
| 41 |
+
} else if( dtype == torch::kFloat32 ) {
|
| 42 |
+
return TypeId<fp32>::Value;
|
| 43 |
+
} else {
|
| 44 |
+
TORCH_CHECK(false, "Type not supported: ", dtype);
|
| 45 |
+
}
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 49 |
+
|
| 50 |
+
uint64_t get_key(torch::Dtype wtype, torch::Dtype itype, torch::Dtype rtype, torch::Dtype otype, torch::Dtype ctype, uint64_t hidden_size) {
|
| 51 |
+
using namespace layer_norm;
|
| 52 |
+
uint64_t type_key = get_type_id(wtype) | (get_type_id(itype) << 2) | (get_type_id(rtype) << 4) | (get_type_id(otype) << 6) | (get_type_id(ctype) << 8);
|
| 53 |
+
uint64_t launcher_key = (type_key << 32) | hidden_size;
|
| 54 |
+
return launcher_key;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
} // namespace layer_norm
|
| 58 |
+
|
| 59 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 60 |
+
|
| 61 |
+
layer_norm::FwdFunction & get_fwd_launcher(torch::Dtype wtype, torch::Dtype itype, torch::Dtype rtype, torch::Dtype otype, torch::Dtype ctype, uint32_t hidden_size) {
|
| 62 |
+
auto iter = layer_norm::FWD_FUNCS.find(layer_norm::get_key(wtype, itype, rtype, otype, ctype, hidden_size));
|
| 63 |
+
if( iter != layer_norm::FWD_FUNCS.end() ) {
|
| 64 |
+
return iter->second;
|
| 65 |
+
} else {
|
| 66 |
+
TORCH_CHECK(false, "FWD: Unsupported hidden_size or types: ", hidden_size, wtype, itype, rtype, otype, ctype);
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 71 |
+
|
| 72 |
+
layer_norm::BwdFunction & get_bwd_launcher(torch::Dtype wtype, torch::Dtype itype, torch::Dtype rtype, torch::Dtype otype, torch::Dtype ctype, uint32_t hidden_size) {
|
| 73 |
+
auto iter = layer_norm::BWD_FUNCS.find(layer_norm::get_key(wtype, itype, rtype, otype, ctype, hidden_size));
|
| 74 |
+
if( iter != layer_norm::BWD_FUNCS.end() ) {
|
| 75 |
+
return iter->second;
|
| 76 |
+
} else {
|
| 77 |
+
TORCH_CHECK(false, "BWD: Unsupported hidden_size or types: ", hidden_size, wtype, itype, rtype, otype, ctype);
|
| 78 |
+
}
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 82 |
+
|
| 83 |
+
layer_norm::FwdFunction & get_parallel_fwd_launcher(torch::Dtype wtype, torch::Dtype itype, torch::Dtype rtype, torch::Dtype otype, torch::Dtype ctype, uint32_t hidden_size) {
|
| 84 |
+
auto iter = layer_norm::PARALLEL_FWD_FUNCS.find(layer_norm::get_key(wtype, itype, rtype, otype, ctype, hidden_size));
|
| 85 |
+
if( iter != layer_norm::PARALLEL_FWD_FUNCS.end() ) {
|
| 86 |
+
return iter->second;
|
| 87 |
+
} else {
|
| 88 |
+
TORCH_CHECK(false, "FWD: Unsupported hidden_size or types: ", hidden_size, wtype, itype, rtype, otype, ctype);
|
| 89 |
+
}
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 93 |
+
|
| 94 |
+
layer_norm::BwdFunction & get_parallel_bwd_launcher(torch::Dtype wtype, torch::Dtype itype, torch::Dtype rtype, torch::Dtype otype, torch::Dtype ctype, uint32_t hidden_size) {
|
| 95 |
+
auto iter = layer_norm::PARALLEL_BWD_FUNCS.find(layer_norm::get_key(wtype, itype, rtype, otype, ctype, hidden_size));
|
| 96 |
+
if( iter != layer_norm::PARALLEL_BWD_FUNCS.end() ) {
|
| 97 |
+
return iter->second;
|
| 98 |
+
} else {
|
| 99 |
+
TORCH_CHECK(false, "BWD: Unsupported hidden_size or types: ", hidden_size, wtype, itype, rtype, otype, ctype);
|
| 100 |
+
}
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 104 |
+
|
| 105 |
+
std::vector<at::Tensor> dropout_add_ln_fwd(const at::Tensor &x0, // Input: BxSxhidden_size
|
| 106 |
+
std::optional<const at::Tensor> &residual_, // Residual: BxSxhidden_size
|
| 107 |
+
const at::Tensor &gamma, // hidden_size
|
| 108 |
+
std::optional<const at::Tensor> &beta_, // hidden_size
|
| 109 |
+
std::optional<const at::Tensor> &rowscale_, // BxS
|
| 110 |
+
std::optional<const at::Tensor> &colscale_, // hidden_size
|
| 111 |
+
std::optional<const at::Tensor> &x0_subset_, // BxS
|
| 112 |
+
std::optional<const at::Tensor> &z_subset_, // BxS
|
| 113 |
+
const float dropout_p,
|
| 114 |
+
const float epsilon,
|
| 115 |
+
const float rowscale_const,
|
| 116 |
+
const int64_t z_numrows,
|
| 117 |
+
std::optional<at::Generator> gen_,
|
| 118 |
+
bool residual_in_fp32=false,
|
| 119 |
+
bool is_rms_norm=false
|
| 120 |
+
) {
|
| 121 |
+
auto itype = x0.scalar_type();
|
| 122 |
+
auto rtype = residual_.has_value()
|
| 123 |
+
? residual_.value().scalar_type()
|
| 124 |
+
: (residual_in_fp32 ? torch::kFloat32 : x0.scalar_type());
|
| 125 |
+
auto wtype = gamma.scalar_type();
|
| 126 |
+
auto otype = itype;
|
| 127 |
+
auto ctype = torch::kFloat32;
|
| 128 |
+
auto mtype = torch::kUInt8;
|
| 129 |
+
|
| 130 |
+
TORCH_CHECK(x0.is_cuda());
|
| 131 |
+
TORCH_CHECK(gamma.is_cuda());
|
| 132 |
+
|
| 133 |
+
TORCH_CHECK(x0.is_contiguous());
|
| 134 |
+
// c10::IntArrayRef does not own the storage, so we need to construct a vector.
|
| 135 |
+
// Otherwise just constructing IntArrayRef({blah}) will cause uninitialized memory because
|
| 136 |
+
// blah is then deallocated.
|
| 137 |
+
std::vector<int64_t> sizes_vec {!x0_subset_.has_value() ? x0.size(0) : x0_subset_.value().size(0), x0.size(1)};
|
| 138 |
+
auto sizes = c10::IntArrayRef(sizes_vec);
|
| 139 |
+
TORCH_CHECK(x0.dim() == 2);
|
| 140 |
+
TORCH_CHECK(sizes.size() == 2);
|
| 141 |
+
|
| 142 |
+
const int rows = sizes[0];
|
| 143 |
+
const int cols = sizes[1];
|
| 144 |
+
auto hidden_size = gamma.numel();
|
| 145 |
+
TORCH_CHECK(hidden_size == cols);
|
| 146 |
+
|
| 147 |
+
if (beta_.has_value()) {
|
| 148 |
+
auto beta = beta_.value();
|
| 149 |
+
TORCH_CHECK(beta.dtype() == wtype);
|
| 150 |
+
TORCH_CHECK(beta.is_cuda());
|
| 151 |
+
TORCH_CHECK(beta.is_contiguous());
|
| 152 |
+
TORCH_CHECK(beta.sizes() == gamma.sizes());
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
if (residual_.has_value()) {
|
| 156 |
+
auto residual = residual_.value();
|
| 157 |
+
TORCH_CHECK(residual.is_cuda());
|
| 158 |
+
TORCH_CHECK(residual.is_contiguous());
|
| 159 |
+
TORCH_CHECK(residual.sizes() == sizes);
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
if (rowscale_.has_value()) {
|
| 163 |
+
auto rowscale = rowscale_.value();
|
| 164 |
+
TORCH_CHECK(rowscale.is_cuda());
|
| 165 |
+
TORCH_CHECK(rowscale.is_contiguous());
|
| 166 |
+
TORCH_CHECK(rowscale.sizes() == c10::IntArrayRef{rows});
|
| 167 |
+
TORCH_CHECK(rowscale.dtype() == itype);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
if (colscale_.has_value()) {
|
| 171 |
+
auto colscale = colscale_.value();
|
| 172 |
+
TORCH_CHECK(colscale.is_cuda());
|
| 173 |
+
TORCH_CHECK(colscale.is_contiguous());
|
| 174 |
+
TORCH_CHECK(colscale.sizes() == c10::IntArrayRef{cols});
|
| 175 |
+
TORCH_CHECK(colscale.dtype() == wtype);
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
if (x0_subset_.has_value()) {
|
| 179 |
+
auto x0_subset = x0_subset_.value();
|
| 180 |
+
TORCH_CHECK(x0_subset.is_cuda());
|
| 181 |
+
TORCH_CHECK(x0_subset.is_contiguous());
|
| 182 |
+
TORCH_CHECK(x0_subset.sizes() == c10::IntArrayRef{rows});
|
| 183 |
+
TORCH_CHECK(x0_subset.dtype() == torch::kInt32);
|
| 184 |
+
|
| 185 |
+
TORCH_CHECK(z_subset_.has_value());
|
| 186 |
+
auto z_subset = z_subset_.value();
|
| 187 |
+
TORCH_CHECK(z_subset.is_cuda());
|
| 188 |
+
TORCH_CHECK(z_subset.is_contiguous());
|
| 189 |
+
TORCH_CHECK(z_subset.sizes() == c10::IntArrayRef{rows});
|
| 190 |
+
TORCH_CHECK(z_subset.dtype() == torch::kInt32);
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
TORCH_CHECK((hidden_size % 8 == 0) && (hidden_size <= 8192));
|
| 194 |
+
TORCH_CHECK(epsilon >= 0.f);
|
| 195 |
+
|
| 196 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 197 |
+
at::cuda::CUDAGuard device_guard{x0.device()};
|
| 198 |
+
|
| 199 |
+
auto opts = x0.options();
|
| 200 |
+
|
| 201 |
+
bool save_x = residual_.has_value() || (dropout_p > 0.f) || rowscale_.has_value() || colscale_.has_value() || x0_subset_.has_value() || (itype != rtype);
|
| 202 |
+
at::Tensor x;
|
| 203 |
+
if (save_x) { x = torch::empty(sizes, opts.dtype(rtype)); }
|
| 204 |
+
at::Tensor dmask;
|
| 205 |
+
if (dropout_p > 0.f) { dmask = torch::empty(x0.sizes(), opts.dtype(mtype)); };
|
| 206 |
+
auto z = torch::empty(z_subset_.has_value() ? c10::IntArrayRef{z_numrows, cols} : sizes, opts.dtype(otype));
|
| 207 |
+
|
| 208 |
+
auto mu = torch::empty({ rows }, opts.dtype(ctype));
|
| 209 |
+
auto rsigma = torch::empty({ rows }, opts.dtype(ctype));
|
| 210 |
+
|
| 211 |
+
layer_norm::LaunchParams<layer_norm::FwdParams> launch_params;
|
| 212 |
+
|
| 213 |
+
launch_params.props = at::cuda::getCurrentDeviceProperties();
|
| 214 |
+
launch_params.stream = at::cuda::getCurrentCUDAStream().stream();
|
| 215 |
+
TORCH_CHECK(dropout_p < 1.f);
|
| 216 |
+
launch_params.params.dropout_keep_p = 1.f - dropout_p;
|
| 217 |
+
launch_params.params.residual = residual_.has_value() ? residual_.value().data_ptr() : nullptr;
|
| 218 |
+
launch_params.params.rowscale = rowscale_.has_value() ? rowscale_.value().data_ptr() : nullptr;
|
| 219 |
+
launch_params.params.colscale = colscale_.has_value() ? colscale_.value().data_ptr() : nullptr;
|
| 220 |
+
launch_params.params.x0_subset = x0_subset_.has_value() ? x0_subset_.value().data_ptr() : nullptr;
|
| 221 |
+
launch_params.params.z_subset = z_subset_.has_value() ? z_subset_.value().data_ptr() : nullptr;
|
| 222 |
+
|
| 223 |
+
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
|
| 224 |
+
gen_, at::cuda::detail::getDefaultCUDAGenerator());
|
| 225 |
+
|
| 226 |
+
auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
|
| 227 |
+
const int multiple = hidden_size <= 1536 ? 256 : (hidden_size <= 3072 ? 512 : 1024);
|
| 228 |
+
// Request the kernel launcher.
|
| 229 |
+
auto launcher = get_fwd_launcher(wtype, itype, rtype, otype, ctype, round_multiple(hidden_size, multiple));
|
| 230 |
+
|
| 231 |
+
// Set the kernel runtime parameters.
|
| 232 |
+
layer_norm::FwdParams ¶ms = launch_params.params;
|
| 233 |
+
params.rows = rows;
|
| 234 |
+
params.cols = cols;
|
| 235 |
+
params.x0 = x0.data_ptr();
|
| 236 |
+
params.x = save_x ? x.data_ptr() : nullptr;
|
| 237 |
+
params.dmask = dropout_p > 0.f ? dmask.data_ptr() : nullptr;
|
| 238 |
+
params.mu = mu.data_ptr();
|
| 239 |
+
params.rs = rsigma.data_ptr();
|
| 240 |
+
params.gamma = gamma.data_ptr();
|
| 241 |
+
params.beta = beta_.has_value() ? beta_.value().data_ptr() : nullptr;
|
| 242 |
+
params.z = z.data_ptr();
|
| 243 |
+
params.epsilon = epsilon;
|
| 244 |
+
params.dropout_scale = 1.f / (1.f - dropout_p);
|
| 245 |
+
params.inverse_cols = 1.f / float(params.cols);
|
| 246 |
+
params.rowscale_const = rowscale_const;
|
| 247 |
+
params.is_rms_norm = is_rms_norm;
|
| 248 |
+
|
| 249 |
+
// Query the kernel-specific launch parameters.
|
| 250 |
+
launcher(launch_params, true);
|
| 251 |
+
|
| 252 |
+
at::Tensor workspace, barrier;
|
| 253 |
+
|
| 254 |
+
if (dropout_p > 0.f) {
|
| 255 |
+
// number of times random will be generated per thread, to offset philox counter in thc random
|
| 256 |
+
// state
|
| 257 |
+
int64_t counter_offset = launch_params.elts_per_thread;
|
| 258 |
+
|
| 259 |
+
// See Note [Acquire lock when using random generators]
|
| 260 |
+
{
|
| 261 |
+
std::lock_guard<std::mutex> lock(gen->mutex_);
|
| 262 |
+
params.philox_args = gen->philox_cuda_state(counter_offset);
|
| 263 |
+
}
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
if( launch_params.barrier_size > 0 ) {
|
| 267 |
+
auto options = x0.options();
|
| 268 |
+
barrier = torch::zeros(launch_params.barrier_size, options.dtype(torch::kInt32));
|
| 269 |
+
workspace = torch::empty(launch_params.workspace_bytes, options.dtype(torch::kChar));
|
| 270 |
+
params.workspace = workspace.data_ptr();
|
| 271 |
+
params.barrier = barrier.data_ptr<int>();
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
// Launch the kernel.
|
| 275 |
+
launcher(launch_params, false);
|
| 276 |
+
|
| 277 |
+
return { z, x, dmask, mu, rsigma };
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 281 |
+
|
| 282 |
+
std::vector<at::Tensor> dropout_add_ln_bwd(const at::Tensor &dz, // BxSxhidden_size
|
| 283 |
+
std::optional<const at::Tensor> &dx_, // BxSxhidden_size
|
| 284 |
+
const at::Tensor &x, // BxSxhidden_size
|
| 285 |
+
std::optional<const at::Tensor> &x0_, // BxSxhidden_size
|
| 286 |
+
std::optional<const at::Tensor> &dmask_, // BxSxhidden_size
|
| 287 |
+
const at::Tensor &mu, // BxS, FP32!
|
| 288 |
+
const at::Tensor &rsigma, // BxS, FP32!
|
| 289 |
+
const at::Tensor &gamma, // hidden_size
|
| 290 |
+
std::optional<const at::Tensor> &rowscale_, // BxS
|
| 291 |
+
std::optional<const at::Tensor> &colscale_, // hidden_size
|
| 292 |
+
std::optional<const at::Tensor> &x0_subset_, // BxS
|
| 293 |
+
std::optional<const at::Tensor> &z_subset_, // BxS
|
| 294 |
+
const float dropout_p,
|
| 295 |
+
const float rowscale_const,
|
| 296 |
+
const int64_t x0_numrows,
|
| 297 |
+
const bool has_residual,
|
| 298 |
+
bool is_rms_norm=false
|
| 299 |
+
) {
|
| 300 |
+
|
| 301 |
+
auto itype = dz.scalar_type();
|
| 302 |
+
auto rtype = x.scalar_type();
|
| 303 |
+
auto wtype = gamma.scalar_type();
|
| 304 |
+
auto otype = itype;
|
| 305 |
+
auto ctype = torch::kFloat32;
|
| 306 |
+
auto mtype = torch::kUInt8;
|
| 307 |
+
|
| 308 |
+
if (dropout_p > 0.f) { TORCH_CHECK(dmask_.has_value()); }
|
| 309 |
+
|
| 310 |
+
TORCH_CHECK(dz.dtype() == otype);
|
| 311 |
+
TORCH_CHECK(mu.dtype() == ctype);
|
| 312 |
+
TORCH_CHECK(rsigma.dtype() == ctype);
|
| 313 |
+
|
| 314 |
+
TORCH_CHECK(x.is_cuda());
|
| 315 |
+
TORCH_CHECK(dz.is_cuda());
|
| 316 |
+
TORCH_CHECK(mu.is_cuda());
|
| 317 |
+
TORCH_CHECK(rsigma.is_cuda());
|
| 318 |
+
TORCH_CHECK(gamma.is_cuda());
|
| 319 |
+
|
| 320 |
+
TORCH_CHECK(x.is_contiguous());
|
| 321 |
+
TORCH_CHECK(dz.is_contiguous());
|
| 322 |
+
|
| 323 |
+
auto sizes = x.sizes();
|
| 324 |
+
TORCH_CHECK(sizes.size() == 2);
|
| 325 |
+
auto rows = sizes[0];
|
| 326 |
+
auto cols = sizes[1];
|
| 327 |
+
TORCH_CHECK(dz.dim() == 2);
|
| 328 |
+
TORCH_CHECK(dz.size(1) == cols);
|
| 329 |
+
auto hidden_size = gamma.numel();
|
| 330 |
+
TORCH_CHECK(hidden_size == cols);
|
| 331 |
+
|
| 332 |
+
// c10::IntArrayRef does not own the storage, so we need to construct a vector.
|
| 333 |
+
// Otherwise just constructing IntArrayRef({blah}) will cause uninitialized memory because
|
| 334 |
+
// blah is then deallocated.
|
| 335 |
+
std::vector<int64_t> x0_sizes_vec {!x0_subset_.has_value() ? rows : x0_numrows, cols};
|
| 336 |
+
auto x0_sizes = c10::IntArrayRef(x0_sizes_vec);
|
| 337 |
+
|
| 338 |
+
if (dx_.has_value()) {
|
| 339 |
+
auto dx = dx_.value();
|
| 340 |
+
TORCH_CHECK(dx.dtype() == rtype);
|
| 341 |
+
TORCH_CHECK(dx.is_cuda());
|
| 342 |
+
TORCH_CHECK(dx.is_contiguous());
|
| 343 |
+
TORCH_CHECK(dx.sizes() == sizes);
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
if (dmask_.has_value()) {
|
| 347 |
+
auto dmask = dmask_.value();
|
| 348 |
+
TORCH_CHECK(dmask.dtype() == mtype);
|
| 349 |
+
TORCH_CHECK(dmask.is_cuda());
|
| 350 |
+
TORCH_CHECK(dmask.is_contiguous());
|
| 351 |
+
TORCH_CHECK(dmask.sizes() == x0_sizes);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
if (rowscale_.has_value()) {
|
| 355 |
+
auto rowscale = rowscale_.value();
|
| 356 |
+
TORCH_CHECK(rowscale.is_cuda());
|
| 357 |
+
TORCH_CHECK(rowscale.is_contiguous());
|
| 358 |
+
TORCH_CHECK(rowscale.sizes() == c10::IntArrayRef{rows});
|
| 359 |
+
TORCH_CHECK(rowscale.dtype() == itype);
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
if (colscale_.has_value()) {
|
| 363 |
+
auto colscale = colscale_.value();
|
| 364 |
+
TORCH_CHECK(colscale.is_cuda());
|
| 365 |
+
TORCH_CHECK(colscale.is_contiguous());
|
| 366 |
+
TORCH_CHECK(colscale.sizes() == c10::IntArrayRef{cols});
|
| 367 |
+
TORCH_CHECK(colscale.dtype() == wtype);
|
| 368 |
+
|
| 369 |
+
TORCH_CHECK(x0_.has_value());
|
| 370 |
+
auto x0 = x0_.value();
|
| 371 |
+
TORCH_CHECK(x0.is_cuda());
|
| 372 |
+
TORCH_CHECK(x0.is_contiguous());
|
| 373 |
+
TORCH_CHECK(x0.sizes() == x0_sizes);
|
| 374 |
+
TORCH_CHECK(x0.dtype() == itype);
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
if (x0_subset_.has_value()) {
|
| 378 |
+
auto x0_subset = x0_subset_.value();
|
| 379 |
+
TORCH_CHECK(x0_subset.is_cuda());
|
| 380 |
+
TORCH_CHECK(x0_subset.is_contiguous());
|
| 381 |
+
TORCH_CHECK(x0_subset.sizes() == c10::IntArrayRef{rows});
|
| 382 |
+
TORCH_CHECK(x0_subset.dtype() == torch::kInt32);
|
| 383 |
+
|
| 384 |
+
TORCH_CHECK(z_subset_.has_value());
|
| 385 |
+
auto z_subset = z_subset_.value();
|
| 386 |
+
TORCH_CHECK(z_subset.is_cuda());
|
| 387 |
+
TORCH_CHECK(z_subset.is_contiguous());
|
| 388 |
+
TORCH_CHECK(z_subset.sizes() == c10::IntArrayRef{rows});
|
| 389 |
+
TORCH_CHECK(z_subset.dtype() == torch::kInt32);
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
TORCH_CHECK((hidden_size % 8 == 0) && (hidden_size <= 8192));
|
| 393 |
+
|
| 394 |
+
TORCH_CHECK(mu.numel() == rows);
|
| 395 |
+
TORCH_CHECK(mu.sizes() == rsigma.sizes());
|
| 396 |
+
|
| 397 |
+
TORCH_CHECK(gamma.numel() == cols);
|
| 398 |
+
|
| 399 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 400 |
+
at::cuda::CUDAGuard device_guard{dz.device()};
|
| 401 |
+
|
| 402 |
+
auto opts = x.options();
|
| 403 |
+
|
| 404 |
+
auto dx0 = torch::empty(x0_sizes, opts.dtype(itype));
|
| 405 |
+
at::Tensor dresidual;
|
| 406 |
+
if (has_residual) { dresidual = torch::empty_like(x, opts.dtype(rtype)); }
|
| 407 |
+
auto dgamma = torch::empty_like(gamma);
|
| 408 |
+
auto dbeta = torch::empty_like(gamma);
|
| 409 |
+
at::Tensor dcolscale;
|
| 410 |
+
if (colscale_.has_value()) {
|
| 411 |
+
dcolscale = torch::empty_like(colscale_.value());
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
layer_norm::LaunchParams<layer_norm::BwdParams> launch_params;
|
| 415 |
+
launch_params.stream = at::cuda::getCurrentCUDAStream().stream();
|
| 416 |
+
launch_params.props = at::cuda::getCurrentDeviceProperties();
|
| 417 |
+
TORCH_CHECK(dropout_p < 1.f);
|
| 418 |
+
launch_params.params.dropout_keep_p = 1.f - dropout_p;
|
| 419 |
+
launch_params.params.dresidual = has_residual ? dresidual.data_ptr() : nullptr;
|
| 420 |
+
launch_params.params.rowscale = rowscale_.has_value() ? rowscale_.value().data_ptr() : nullptr;
|
| 421 |
+
launch_params.params.colscale = colscale_.has_value() ? colscale_.value().data_ptr() : nullptr;
|
| 422 |
+
launch_params.params.x0_subset = x0_subset_.has_value() ? x0_subset_.value().data_ptr() : nullptr;
|
| 423 |
+
launch_params.params.z_subset = z_subset_.has_value() ? z_subset_.value().data_ptr() : nullptr;
|
| 424 |
+
|
| 425 |
+
auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
|
| 426 |
+
const int multiple = hidden_size <= 1536 ? 256 : (hidden_size <= 3072 ? 512 : 1024);
|
| 427 |
+
auto launcher = get_bwd_launcher(wtype, itype, rtype, otype, ctype, round_multiple(hidden_size, multiple));
|
| 428 |
+
|
| 429 |
+
launcher(launch_params, true);
|
| 430 |
+
|
| 431 |
+
auto dgamma_part = torch::empty({ launch_params.params.ctas_per_col, hidden_size }, opts.dtype(ctype));
|
| 432 |
+
auto dbeta_part = torch::empty({ launch_params.params.ctas_per_col, hidden_size }, opts.dtype(ctype));
|
| 433 |
+
at::Tensor dcolscale_part;
|
| 434 |
+
if (colscale_.has_value()) {
|
| 435 |
+
dcolscale_part = torch::empty({ launch_params.params.ctas_per_col, hidden_size }, opts.dtype(ctype));
|
| 436 |
+
}
|
| 437 |
+
at::Tensor workspace, barrier;
|
| 438 |
+
|
| 439 |
+
layer_norm::BwdParams ¶ms = launch_params.params;
|
| 440 |
+
params.rows = rows;
|
| 441 |
+
params.cols = cols;
|
| 442 |
+
params.x = x.data_ptr();
|
| 443 |
+
params.x0 = x0_.has_value() ? x0_.value().data_ptr() : nullptr;
|
| 444 |
+
params.dmask = dropout_p > 0.f ? dmask_.value().data_ptr() : nullptr;
|
| 445 |
+
params.mu = mu.data_ptr();
|
| 446 |
+
params.rs = rsigma.data_ptr();
|
| 447 |
+
params.gamma = gamma.data_ptr();
|
| 448 |
+
params.dz = dz.data_ptr();
|
| 449 |
+
params.dx = dx_.has_value() ? dx_.value().data_ptr() : nullptr;
|
| 450 |
+
params.dx0 = dx0.data_ptr();
|
| 451 |
+
params.dbeta = dbeta.data_ptr();
|
| 452 |
+
params.dgamma = dgamma.data_ptr();
|
| 453 |
+
params.dcolscale = colscale_.has_value() ? dcolscale.data_ptr() : nullptr;
|
| 454 |
+
params.dbeta_part = dbeta_part.data_ptr();
|
| 455 |
+
params.dgamma_part = dgamma_part.data_ptr();
|
| 456 |
+
params.dcolscale_part = colscale_.has_value() ? dcolscale_part.data_ptr() : nullptr;
|
| 457 |
+
params.dropout_scale = 1.f / (1.f - dropout_p);
|
| 458 |
+
params.inverse_cols = 1.f / float(params.cols);
|
| 459 |
+
params.rowscale_const = rowscale_const;
|
| 460 |
+
params.is_rms_norm = is_rms_norm;
|
| 461 |
+
|
| 462 |
+
if( launch_params.barrier_size > 0 ) {
|
| 463 |
+
// TODO Any way to avoid this?
|
| 464 |
+
barrier = torch::zeros(launch_params.barrier_size, opts.dtype(torch::kInt32));
|
| 465 |
+
workspace = torch::empty(launch_params.workspace_bytes, opts.dtype(torch::kChar));
|
| 466 |
+
params.workspace = workspace.data_ptr();
|
| 467 |
+
params.barrier = barrier.data_ptr<int>();
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
launcher(launch_params, false);
|
| 471 |
+
|
| 472 |
+
std::vector<at::Tensor> result = { dx0, dresidual, dgamma, dbeta, dgamma_part, dbeta_part };
|
| 473 |
+
if (colscale_.has_value()) {
|
| 474 |
+
result.push_back(dcolscale);
|
| 475 |
+
result.push_back(dcolscale_part);
|
| 476 |
+
}
|
| 477 |
+
return result;
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 481 |
+
|
| 482 |
+
std::vector<at::Tensor> dropout_add_ln_parallel_residual_fwd(
|
| 483 |
+
const at::Tensor &x0, // Input: BxSxhidden_size
|
| 484 |
+
std::optional<const at::Tensor> &x1_, // Input: BxSxhidden_size
|
| 485 |
+
std::optional<const at::Tensor> &residual_, // Residual: BxSxhidden_size
|
| 486 |
+
const at::Tensor &gamma0, // hidden_size
|
| 487 |
+
std::optional<const at::Tensor> &beta0_, // hidden_size
|
| 488 |
+
std::optional<const at::Tensor> &gamma1_, // hidden_size
|
| 489 |
+
std::optional<const at::Tensor> &beta1_, // hidden_size
|
| 490 |
+
const float dropout_p,
|
| 491 |
+
const float epsilon,
|
| 492 |
+
std::optional<at::Generator> gen_,
|
| 493 |
+
bool residual_in_fp32=false,
|
| 494 |
+
bool is_rms_norm=false
|
| 495 |
+
) {
|
| 496 |
+
auto itype = x0.scalar_type();
|
| 497 |
+
auto rtype = residual_.has_value()
|
| 498 |
+
? residual_.value().scalar_type()
|
| 499 |
+
: (residual_in_fp32 ? torch::kFloat32 : x0.scalar_type());
|
| 500 |
+
auto wtype = gamma0.scalar_type();
|
| 501 |
+
auto otype = itype;
|
| 502 |
+
auto ctype = torch::kFloat32;
|
| 503 |
+
auto mtype = torch::kUInt8;
|
| 504 |
+
|
| 505 |
+
TORCH_CHECK(x0.is_cuda());
|
| 506 |
+
TORCH_CHECK(gamma0.is_cuda());
|
| 507 |
+
|
| 508 |
+
TORCH_CHECK(x0.is_contiguous());
|
| 509 |
+
const auto sizes = x0.sizes();
|
| 510 |
+
TORCH_CHECK(x0.dim() == 2);
|
| 511 |
+
|
| 512 |
+
const int rows = sizes[0];
|
| 513 |
+
const int cols = sizes[1];
|
| 514 |
+
auto hidden_size = gamma0.numel();
|
| 515 |
+
TORCH_CHECK(hidden_size == cols);
|
| 516 |
+
|
| 517 |
+
if (x1_.has_value()) {
|
| 518 |
+
auto x1 = x1_.value();
|
| 519 |
+
TORCH_CHECK(x1.is_cuda());
|
| 520 |
+
TORCH_CHECK(x1.is_contiguous());
|
| 521 |
+
TORCH_CHECK(x1.sizes() == sizes);
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
if (residual_.has_value()) {
|
| 525 |
+
auto residual = residual_.value();
|
| 526 |
+
TORCH_CHECK(residual.is_cuda());
|
| 527 |
+
TORCH_CHECK(residual.is_contiguous());
|
| 528 |
+
TORCH_CHECK(residual.sizes() == sizes);
|
| 529 |
+
}
|
| 530 |
+
|
| 531 |
+
if (beta0_.has_value()) {
|
| 532 |
+
auto beta0 = beta0_.value();
|
| 533 |
+
TORCH_CHECK(beta0.dtype() == wtype);
|
| 534 |
+
TORCH_CHECK(beta0.is_cuda());
|
| 535 |
+
TORCH_CHECK(beta0.is_contiguous());
|
| 536 |
+
TORCH_CHECK(beta0.sizes() == gamma0.sizes());
|
| 537 |
+
}
|
| 538 |
+
|
| 539 |
+
if (gamma1_.has_value()) {
|
| 540 |
+
auto gamma1 = gamma1_.value();
|
| 541 |
+
TORCH_CHECK(gamma1.dtype() == wtype);
|
| 542 |
+
TORCH_CHECK(gamma1.is_cuda());
|
| 543 |
+
TORCH_CHECK(gamma1.is_contiguous());
|
| 544 |
+
TORCH_CHECK(gamma1.sizes() == gamma0.sizes());
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
if (beta1_.has_value()) {
|
| 548 |
+
auto beta1 = beta1_.value();
|
| 549 |
+
TORCH_CHECK(beta1.dtype() == wtype);
|
| 550 |
+
TORCH_CHECK(beta1.is_cuda());
|
| 551 |
+
TORCH_CHECK(beta1.is_contiguous());
|
| 552 |
+
TORCH_CHECK(beta1.sizes() == gamma0.sizes());
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
TORCH_CHECK((hidden_size % 8 == 0) && (hidden_size <= 8192));
|
| 556 |
+
TORCH_CHECK(epsilon >= 0.f);
|
| 557 |
+
|
| 558 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 559 |
+
at::cuda::CUDAGuard device_guard{x0.device()};
|
| 560 |
+
|
| 561 |
+
auto opts = x0.options();
|
| 562 |
+
|
| 563 |
+
bool save_x = residual_.has_value() || x1_.has_value() || (dropout_p > 0.f) || (itype != rtype);
|
| 564 |
+
at::Tensor x;
|
| 565 |
+
if (save_x) { x = torch::empty(sizes, opts.dtype(rtype)); }
|
| 566 |
+
at::Tensor dmask0, dmask1;
|
| 567 |
+
if (dropout_p > 0.f) {
|
| 568 |
+
dmask0 = torch::empty(x0.sizes(), opts.dtype(mtype));
|
| 569 |
+
if (x1_.has_value()) { dmask1 = torch::empty(x0.sizes(), opts.dtype(mtype)); }
|
| 570 |
+
};
|
| 571 |
+
auto z0 = torch::empty(sizes, opts.dtype(otype));
|
| 572 |
+
at::Tensor z1;
|
| 573 |
+
if (gamma1_.has_value()) { z1 = torch::empty(sizes, opts.dtype(otype)); }
|
| 574 |
+
|
| 575 |
+
auto mu = torch::empty({ rows }, opts.dtype(ctype));
|
| 576 |
+
auto rsigma = torch::empty({ rows }, opts.dtype(ctype));
|
| 577 |
+
|
| 578 |
+
layer_norm::LaunchParams<layer_norm::FwdParams> launch_params;
|
| 579 |
+
|
| 580 |
+
launch_params.props = at::cuda::getCurrentDeviceProperties();
|
| 581 |
+
launch_params.stream = at::cuda::getCurrentCUDAStream().stream();
|
| 582 |
+
TORCH_CHECK(dropout_p < 1.f);
|
| 583 |
+
launch_params.params.dropout_keep_p = 1.f - dropout_p;
|
| 584 |
+
launch_params.params.residual = residual_.has_value() ? residual_.value().data_ptr() : nullptr;
|
| 585 |
+
|
| 586 |
+
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
|
| 587 |
+
gen_, at::cuda::detail::getDefaultCUDAGenerator());
|
| 588 |
+
|
| 589 |
+
auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
|
| 590 |
+
const int multiple = hidden_size <= 1536 ? 256 : (hidden_size <= 3072 ? 512 : 1024);
|
| 591 |
+
// Request the kernel launcher.
|
| 592 |
+
auto launcher = get_parallel_fwd_launcher(wtype, itype, rtype, otype, ctype, round_multiple(hidden_size, multiple));
|
| 593 |
+
|
| 594 |
+
// Set the kernel runtime parameters.
|
| 595 |
+
layer_norm::FwdParams ¶ms = launch_params.params;
|
| 596 |
+
params.rows = rows;
|
| 597 |
+
params.cols = cols;
|
| 598 |
+
params.x0 = x0.data_ptr();
|
| 599 |
+
params.x1 = x1_.has_value() ? x1_.value().data_ptr() : nullptr;
|
| 600 |
+
params.x = save_x ? x.data_ptr() : nullptr;
|
| 601 |
+
params.dmask = dropout_p > 0.f ? dmask0.data_ptr() : nullptr;
|
| 602 |
+
params.dmask1 = (dropout_p > 0.f && x1_.has_value()) ? dmask1.data_ptr() : nullptr;
|
| 603 |
+
params.mu = mu.data_ptr();
|
| 604 |
+
params.rs = rsigma.data_ptr();
|
| 605 |
+
params.gamma = gamma0.data_ptr();
|
| 606 |
+
params.gamma1 = gamma1_.has_value() ? gamma1_.value().data_ptr() : nullptr;
|
| 607 |
+
params.beta = beta0_.has_value() ? beta0_.value().data_ptr() : nullptr;
|
| 608 |
+
params.beta1 = beta1_.has_value() ? beta1_.value().data_ptr() : nullptr;
|
| 609 |
+
params.z = z0.data_ptr();
|
| 610 |
+
params.z1 = gamma1_.has_value() ? z1.data_ptr() : nullptr;
|
| 611 |
+
params.epsilon = epsilon;
|
| 612 |
+
params.dropout_scale = 1.f / (1.f - dropout_p);
|
| 613 |
+
params.inverse_cols = 1.f / float(params.cols);
|
| 614 |
+
params.is_rms_norm = is_rms_norm;
|
| 615 |
+
|
| 616 |
+
// Query the kernel-specific launch parameters.
|
| 617 |
+
launcher(launch_params, true);
|
| 618 |
+
|
| 619 |
+
at::Tensor workspace, barrier;
|
| 620 |
+
|
| 621 |
+
if (dropout_p > 0.f) {
|
| 622 |
+
// number of times random will be generated per thread, to offset philox counter in thc random
|
| 623 |
+
// state
|
| 624 |
+
int64_t counter_offset = 2 * launch_params.elts_per_thread;
|
| 625 |
+
|
| 626 |
+
// See Note [Acquire lock when using random generators]
|
| 627 |
+
{
|
| 628 |
+
std::lock_guard<std::mutex> lock(gen->mutex_);
|
| 629 |
+
params.philox_args = gen->philox_cuda_state(counter_offset);
|
| 630 |
+
}
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
if( launch_params.barrier_size > 0 ) {
|
| 634 |
+
auto options = x0.options();
|
| 635 |
+
barrier = torch::zeros(launch_params.barrier_size, options.dtype(torch::kInt32));
|
| 636 |
+
workspace = torch::empty(launch_params.workspace_bytes, options.dtype(torch::kChar));
|
| 637 |
+
params.workspace = workspace.data_ptr();
|
| 638 |
+
params.barrier = barrier.data_ptr<int>();
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
// Launch the kernel.
|
| 642 |
+
launcher(launch_params, false);
|
| 643 |
+
|
| 644 |
+
return { z0, z1, x, dmask0, dmask1, mu, rsigma };
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 648 |
+
|
| 649 |
+
std::vector<at::Tensor> dropout_add_ln_parallel_residual_bwd(
|
| 650 |
+
const at::Tensor &dz0, // BxSxhidden_size
|
| 651 |
+
std::optional<const at::Tensor> &dz1_, // BxSxhidden_size
|
| 652 |
+
std::optional<const at::Tensor> &dx_, // BxSxhidden_size
|
| 653 |
+
const at::Tensor &x, // BxSxhidden_size
|
| 654 |
+
std::optional<const at::Tensor> &dmask0_, // BxSxhidden_size
|
| 655 |
+
std::optional<const at::Tensor> &dmask1_, // BxSxhidden_size
|
| 656 |
+
const at::Tensor &mu, // BxS, FP32!
|
| 657 |
+
const at::Tensor &rsigma, // BxS, FP32!
|
| 658 |
+
const at::Tensor &gamma0, // hidden_size
|
| 659 |
+
std::optional<const at::Tensor> &gamma1_, // hidden_size
|
| 660 |
+
const float dropout_p,
|
| 661 |
+
const bool has_x1,
|
| 662 |
+
const bool has_residual,
|
| 663 |
+
bool is_rms_norm=false
|
| 664 |
+
) {
|
| 665 |
+
|
| 666 |
+
auto itype = dz0.scalar_type();
|
| 667 |
+
auto rtype = x.scalar_type();
|
| 668 |
+
auto wtype = gamma0.scalar_type();
|
| 669 |
+
auto otype = itype;
|
| 670 |
+
auto ctype = torch::kFloat32;
|
| 671 |
+
auto mtype = torch::kUInt8;
|
| 672 |
+
|
| 673 |
+
if (dropout_p > 0.f) { TORCH_CHECK(dmask0_.has_value()); }
|
| 674 |
+
|
| 675 |
+
TORCH_CHECK(dz0.dtype() == otype);
|
| 676 |
+
TORCH_CHECK(dz0.dtype() == otype);
|
| 677 |
+
TORCH_CHECK(mu.dtype() == ctype);
|
| 678 |
+
TORCH_CHECK(rsigma.dtype() == ctype);
|
| 679 |
+
|
| 680 |
+
TORCH_CHECK(x.is_cuda());
|
| 681 |
+
TORCH_CHECK(dz0.is_cuda());
|
| 682 |
+
TORCH_CHECK(mu.is_cuda());
|
| 683 |
+
TORCH_CHECK(rsigma.is_cuda());
|
| 684 |
+
TORCH_CHECK(gamma0.is_cuda());
|
| 685 |
+
|
| 686 |
+
TORCH_CHECK(x.is_contiguous());
|
| 687 |
+
TORCH_CHECK(dz0.is_contiguous());
|
| 688 |
+
|
| 689 |
+
auto sizes = x.sizes();
|
| 690 |
+
TORCH_CHECK(sizes.size() == 2);
|
| 691 |
+
auto rows = sizes[0];
|
| 692 |
+
auto cols = sizes[1];
|
| 693 |
+
TORCH_CHECK(dz0.dim() == 2);
|
| 694 |
+
TORCH_CHECK(dz0.size(1) == cols);
|
| 695 |
+
auto hidden_size = gamma0.numel();
|
| 696 |
+
TORCH_CHECK(hidden_size == cols);
|
| 697 |
+
|
| 698 |
+
if (dz1_.has_value()) {
|
| 699 |
+
auto dz1 = dz1_.value();
|
| 700 |
+
TORCH_CHECK(dz1.dtype() == otype);
|
| 701 |
+
TORCH_CHECK(dz1.is_cuda());
|
| 702 |
+
TORCH_CHECK(dz1.is_contiguous());
|
| 703 |
+
TORCH_CHECK(dz1.sizes() == sizes);
|
| 704 |
+
|
| 705 |
+
TORCH_CHECK(gamma1_.has_value());
|
| 706 |
+
auto gamma1 = gamma1_.value();
|
| 707 |
+
TORCH_CHECK(gamma1.dtype() == wtype);
|
| 708 |
+
TORCH_CHECK(gamma1.is_cuda());
|
| 709 |
+
TORCH_CHECK(gamma1.is_contiguous());
|
| 710 |
+
TORCH_CHECK(gamma1.sizes() == gamma0.sizes());
|
| 711 |
+
}
|
| 712 |
+
|
| 713 |
+
if (dx_.has_value()) {
|
| 714 |
+
auto dx = dx_.value();
|
| 715 |
+
TORCH_CHECK(dx.dtype() == rtype);
|
| 716 |
+
TORCH_CHECK(dx.is_cuda());
|
| 717 |
+
TORCH_CHECK(dx.is_contiguous());
|
| 718 |
+
TORCH_CHECK(dx.sizes() == sizes);
|
| 719 |
+
}
|
| 720 |
+
|
| 721 |
+
if (dmask0_.has_value()) {
|
| 722 |
+
auto dmask0 = dmask0_.value();
|
| 723 |
+
TORCH_CHECK(dmask0.dtype() == mtype);
|
| 724 |
+
TORCH_CHECK(dmask0.is_cuda());
|
| 725 |
+
TORCH_CHECK(dmask0.is_contiguous());
|
| 726 |
+
TORCH_CHECK(dmask0.sizes() == sizes);
|
| 727 |
+
|
| 728 |
+
if (has_x1) {
|
| 729 |
+
TORCH_CHECK(dmask1_.has_value());
|
| 730 |
+
auto dmask1 = dmask1_.value();
|
| 731 |
+
TORCH_CHECK(dmask1.dtype() == mtype);
|
| 732 |
+
TORCH_CHECK(dmask1.is_cuda());
|
| 733 |
+
TORCH_CHECK(dmask1.is_contiguous());
|
| 734 |
+
TORCH_CHECK(dmask1.sizes() == sizes);
|
| 735 |
+
}
|
| 736 |
+
}
|
| 737 |
+
|
| 738 |
+
TORCH_CHECK((hidden_size % 8 == 0) && (hidden_size <= 8192));
|
| 739 |
+
|
| 740 |
+
TORCH_CHECK(mu.numel() == rows);
|
| 741 |
+
TORCH_CHECK(mu.sizes() == rsigma.sizes());
|
| 742 |
+
|
| 743 |
+
// Otherwise the kernel will be launched from cuda:0 device
|
| 744 |
+
at::cuda::CUDAGuard device_guard{dz0.device()};
|
| 745 |
+
|
| 746 |
+
auto opts = x.options();
|
| 747 |
+
|
| 748 |
+
auto dx0 = torch::empty(sizes, opts.dtype(itype));
|
| 749 |
+
at::Tensor dx1;
|
| 750 |
+
if (has_x1) { dx1 = torch::empty(sizes, opts.dtype(itype)); }
|
| 751 |
+
at::Tensor dresidual;
|
| 752 |
+
if (has_residual) { dresidual = torch::empty_like(x, opts.dtype(rtype)); }
|
| 753 |
+
auto dgamma0 = torch::empty_like(gamma0);
|
| 754 |
+
auto dbeta0 = torch::empty_like(gamma0);
|
| 755 |
+
at::Tensor dgamma1, dbeta1;
|
| 756 |
+
if (gamma1_.has_value()) {
|
| 757 |
+
dgamma1 = torch::empty_like(gamma0);
|
| 758 |
+
dbeta1 = torch::empty_like(gamma0);
|
| 759 |
+
}
|
| 760 |
+
|
| 761 |
+
layer_norm::LaunchParams<layer_norm::BwdParams> launch_params;
|
| 762 |
+
launch_params.stream = at::cuda::getCurrentCUDAStream().stream();
|
| 763 |
+
launch_params.props = at::cuda::getCurrentDeviceProperties();
|
| 764 |
+
TORCH_CHECK(dropout_p < 1.f);
|
| 765 |
+
launch_params.params.dropout_keep_p = 1.f - dropout_p;
|
| 766 |
+
launch_params.params.dresidual = has_residual ? dresidual.data_ptr() : nullptr;
|
| 767 |
+
|
| 768 |
+
auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
|
| 769 |
+
const int multiple = hidden_size <= 1536 ? 256 : (hidden_size <= 3072 ? 512 : 1024);
|
| 770 |
+
auto launcher = get_parallel_bwd_launcher(wtype, itype, rtype, otype, ctype, round_multiple(hidden_size, multiple));
|
| 771 |
+
|
| 772 |
+
launcher(launch_params, true);
|
| 773 |
+
|
| 774 |
+
auto dgamma0_part = torch::zeros({ launch_params.params.ctas_per_col, hidden_size }, opts.dtype(ctype));
|
| 775 |
+
auto dbeta0_part = torch::zeros({ launch_params.params.ctas_per_col, hidden_size }, opts.dtype(ctype));
|
| 776 |
+
at::Tensor dgamma1_part, dbeta1_part;
|
| 777 |
+
if (gamma1_.has_value()) {
|
| 778 |
+
dgamma1_part = torch::zeros_like(dgamma0_part);
|
| 779 |
+
dbeta1_part = torch::zeros_like(dbeta0_part);
|
| 780 |
+
}
|
| 781 |
+
at::Tensor workspace, barrier;
|
| 782 |
+
|
| 783 |
+
layer_norm::BwdParams ¶ms = launch_params.params;
|
| 784 |
+
params.rows = rows;
|
| 785 |
+
params.cols = cols;
|
| 786 |
+
params.x = x.data_ptr();
|
| 787 |
+
params.dmask = dropout_p > 0.f ? dmask0_.value().data_ptr() : nullptr;
|
| 788 |
+
params.dmask1 = (dropout_p > 0.f && has_x1) ? dmask1_.value().data_ptr() : nullptr;
|
| 789 |
+
params.mu = mu.data_ptr();
|
| 790 |
+
params.rs = rsigma.data_ptr();
|
| 791 |
+
params.gamma = gamma0.data_ptr();
|
| 792 |
+
params.gamma1 = gamma1_.has_value() ? gamma1_.value().data_ptr() : nullptr;
|
| 793 |
+
params.dz = dz0.data_ptr();
|
| 794 |
+
params.dz1 = dz1_.has_value() ? dz1_.value().data_ptr() : nullptr;
|
| 795 |
+
params.dx = dx_.has_value() ? dx_.value().data_ptr() : nullptr;
|
| 796 |
+
params.dx0 = dx0.data_ptr();
|
| 797 |
+
params.dx1 = has_x1 ? dx1.data_ptr() : nullptr;
|
| 798 |
+
params.dbeta = dbeta0.data_ptr();
|
| 799 |
+
params.dgamma = dgamma0.data_ptr();
|
| 800 |
+
params.dbeta1 = gamma1_.has_value() ? dbeta1.data_ptr() : nullptr;
|
| 801 |
+
params.dgamma1 = gamma1_.has_value() ? dgamma1.data_ptr() : nullptr;
|
| 802 |
+
params.dbeta_part = dbeta0_part.data_ptr();
|
| 803 |
+
params.dgamma_part = dgamma0_part.data_ptr();
|
| 804 |
+
params.dbeta1_part = gamma1_.has_value() ? dbeta1_part.data_ptr() : nullptr;
|
| 805 |
+
params.dgamma1_part = gamma1_.has_value() ? dgamma1_part.data_ptr() : nullptr;
|
| 806 |
+
params.dropout_scale = 1.f / (1.f - dropout_p);
|
| 807 |
+
params.inverse_cols = 1.f / float(params.cols);
|
| 808 |
+
params.is_rms_norm = is_rms_norm;
|
| 809 |
+
|
| 810 |
+
if( launch_params.barrier_size > 0 ) {
|
| 811 |
+
// TODO Any way to avoid this?
|
| 812 |
+
barrier = torch::zeros(launch_params.barrier_size, opts.dtype(torch::kInt32));
|
| 813 |
+
workspace = torch::empty(launch_params.workspace_bytes, opts.dtype(torch::kChar));
|
| 814 |
+
params.workspace = workspace.data_ptr();
|
| 815 |
+
params.barrier = barrier.data_ptr<int>();
|
| 816 |
+
}
|
| 817 |
+
|
| 818 |
+
launcher(launch_params, false);
|
| 819 |
+
|
| 820 |
+
std::vector<at::Tensor> result = { dx0, dx1, dresidual, dgamma0, dbeta0, dgamma1, dbeta1, dgamma0_part, dbeta0_part, dgamma1_part, dbeta1_part };
|
| 821 |
+
return result;
|
| 822 |
+
}
|
| 823 |
+
|
| 824 |
+
////////////////////////////////////////////////////////////////////////////////////////////////////
|
| 825 |
+
|
| 826 |
+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
| 827 |
+
m.doc() = "CUDA DropoutAddLayerNorm";
|
| 828 |
+
m.def("dropout_add_ln_fwd", &dropout_add_ln_fwd, "Run Dropout + Add + LayerNorm forward kernel",
|
| 829 |
+
py::arg("x0"), py::arg("residual"), py::arg("gamma"), py::arg("beta_"),
|
| 830 |
+
py::arg("rowscale_"), py::arg("colscale_"), py::arg("x0_subset_"), py::arg("z_subset_"),
|
| 831 |
+
py::arg("dropout_p"), py::arg("epsilon"), py::arg("rowscale_const"), py::arg("z_numrows"),
|
| 832 |
+
py::arg("gen_"), py::arg("residual_in_fp32")=false, py::arg("is_rms_norm")=false);
|
| 833 |
+
m.def("dropout_add_ln_bwd", &dropout_add_ln_bwd, "Run Dropout + Add + LayerNorm backward kernel",
|
| 834 |
+
py::arg("dz"), py::arg("dx_"), py::arg("x"), py::arg("x0_"), py::arg("dmask_"), py::arg("mu"),
|
| 835 |
+
py::arg("rsigma"), py::arg("gamma"), py::arg("rowscale_"), py::arg("colscale_"),
|
| 836 |
+
py::arg("x0_subset_"), py::arg("z_subset_"), py::arg("dropout_p"), py::arg("rowscale_const"),
|
| 837 |
+
py::arg("x0_numrows"), py::arg("has_residual"), py::arg("is_rms_norm")=false);
|
| 838 |
+
m.def("dropout_add_ln_parallel_residual_fwd", &dropout_add_ln_parallel_residual_fwd, "Run Dropout + Add + LayerNorm parallel residual forward kernel",
|
| 839 |
+
py::arg("x0"), py::arg("x1_"), py::arg("residual"), py::arg("gamma0"), py::arg("beta0_"),
|
| 840 |
+
py::arg("gamma1_"), py::arg("beta1_"), py::arg("dropout_p"), py::arg("epsilon"),
|
| 841 |
+
py::arg("gen_"), py::arg("residual_in_fp32")=false, py::arg("is_rms_norm")=false);
|
| 842 |
+
m.def("dropout_add_ln_parallel_residual_bwd", &dropout_add_ln_parallel_residual_bwd, "Run Dropout + Add + LayerNorm parallel residual backward kernel",
|
| 843 |
+
py::arg("dz0"), py::arg("dz1_"), py::arg("dx_"), py::arg("x"), py::arg("dmask0_"),
|
| 844 |
+
py::arg("dmask1_"), py::arg("mu"), py::arg("rsigma"), py::arg("gamma0"), py::arg("gamma1_"),
|
| 845 |
+
py::arg("dropout_p"), py::arg("has_x1"), py::arg("has_residual"), py::arg("is_rms_norm")=false);
|
| 846 |
+
}
|
Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_1024.cu
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ln_bwd_kernels.cuh"
|
| 2 |
+
|
| 3 |
+
// Create backward launch function and register. Macro signature:
|
| 4 |
+
// HIDDEN_SIZE, WTYPE, ITYPE, RTYPE, OTYPE, CTYPE, CTAS_PER_ROW, WARPS_M, WARPS_N, BYTES_PER_LDG, BYTES_PER_LDG_FINAL
|
| 5 |
+
|
| 6 |
+
REGISTER_BWD_LAUNCHER( 1024, fp32, fp32, fp32, fp32, fp32, 1, 4, 1, 16, 4);
|
| 7 |
+
REGISTER_BWD_LAUNCHER( 1024, fp16, fp32, fp32, fp32, fp32, 1, 4, 1, 16, 4);
|
| 8 |
+
REGISTER_BWD_LAUNCHER( 1024, fp32, fp16, fp32, fp16, fp32, 1, 4, 1, 16, 4);
|
| 9 |
+
REGISTER_BWD_LAUNCHER( 1024, fp16, fp16, fp32, fp16, fp32, 1, 4, 1, 16, 4);
|
| 10 |
+
REGISTER_BWD_LAUNCHER( 1024, fp32, fp16, fp16, fp16, fp32, 1, 4, 1, 16, 4);
|
| 11 |
+
REGISTER_BWD_LAUNCHER( 1024, fp32, bf16, fp32, bf16, fp32, 1, 4, 1, 16, 4);
|
| 12 |
+
REGISTER_BWD_LAUNCHER( 1024, bf16, bf16, fp32, bf16, fp32, 1, 4, 1, 16, 4);
|
| 13 |
+
REGISTER_BWD_LAUNCHER( 1024, fp32, bf16, bf16, bf16, fp32, 1, 4, 1, 16, 4);
|
| 14 |
+
REGISTER_BWD_LAUNCHER( 1024, fp16, fp16, fp16, fp16, fp32, 1, 4, 1, 16, 4);
|
| 15 |
+
REGISTER_BWD_LAUNCHER( 1024, bf16, bf16, bf16, bf16, fp32, 1, 4, 1, 16, 4);
|
Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_1280.cu
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ln_bwd_kernels.cuh"
|
| 2 |
+
|
| 3 |
+
// Create backward launch function and register. Macro signature:
|
| 4 |
+
// HIDDEN_SIZE, WTYPE, ITYPE, RTYPE, OTYPE, CTYPE, CTAS_PER_ROW, WARPS_M, WARPS_N, BYTES_PER_LDG, BYTES_PER_LDG_FINAL
|
| 5 |
+
|
| 6 |
+
REGISTER_BWD_LAUNCHER( 1280, fp32, fp32, fp32, fp32, fp32, 1, 4, 1, 16, 4);
|
| 7 |
+
REGISTER_BWD_LAUNCHER( 1280, fp16, fp32, fp32, fp32, fp32, 1, 4, 1, 16, 4);
|
| 8 |
+
REGISTER_BWD_LAUNCHER( 1280, fp32, fp16, fp32, fp16, fp32, 1, 4, 1, 16, 4);
|
| 9 |
+
REGISTER_BWD_LAUNCHER( 1280, fp16, fp16, fp32, fp16, fp32, 1, 4, 1, 16, 4);
|
| 10 |
+
REGISTER_BWD_LAUNCHER( 1280, fp32, fp16, fp16, fp16, fp32, 1, 4, 1, 16, 4);
|
| 11 |
+
REGISTER_BWD_LAUNCHER( 1280, fp32, bf16, fp32, bf16, fp32, 1, 4, 1, 16, 4);
|
| 12 |
+
REGISTER_BWD_LAUNCHER( 1280, bf16, bf16, fp32, bf16, fp32, 1, 4, 1, 16, 4);
|
| 13 |
+
REGISTER_BWD_LAUNCHER( 1280, fp32, bf16, bf16, bf16, fp32, 1, 4, 1, 16, 4);
|
| 14 |
+
REGISTER_BWD_LAUNCHER( 1280, fp16, fp16, fp16, fp16, fp32, 1, 4, 1, 16, 4);
|
| 15 |
+
REGISTER_BWD_LAUNCHER( 1280, bf16, bf16, bf16, bf16, fp32, 1, 4, 1, 16, 4);
|
Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_2048.cu
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ln_bwd_kernels.cuh"
|
| 2 |
+
|
| 3 |
+
// Create backward launch function and register. Macro signature:
|
| 4 |
+
// HIDDEN_SIZE, WTYPE, ITYPE, RTYPE, OTYPE, CTYPE, CTAS_PER_ROW, WARPS_M, WARPS_N, BYTES_PER_LDG, BYTES_PER_LDG_FINAL
|
| 5 |
+
|
| 6 |
+
REGISTER_BWD_LAUNCHER( 2048, fp32, fp32, fp32, fp32, fp32, 1, 1, 4, 16, 4);
|
| 7 |
+
REGISTER_BWD_LAUNCHER( 2048, fp16, fp32, fp32, fp32, fp32, 1, 1, 4, 16, 4);
|
| 8 |
+
REGISTER_BWD_LAUNCHER( 2048, fp32, fp16, fp32, fp16, fp32, 1, 1, 4, 16, 4);
|
| 9 |
+
REGISTER_BWD_LAUNCHER( 2048, fp16, fp16, fp32, fp16, fp32, 1, 1, 4, 16, 4);
|
| 10 |
+
REGISTER_BWD_LAUNCHER( 2048, fp32, fp16, fp16, fp16, fp32, 1, 1, 4, 16, 4);
|
| 11 |
+
REGISTER_BWD_LAUNCHER( 2048, fp32, bf16, fp32, bf16, fp32, 1, 1, 4, 16, 4);
|
| 12 |
+
REGISTER_BWD_LAUNCHER( 2048, bf16, bf16, fp32, bf16, fp32, 1, 1, 4, 16, 4);
|
| 13 |
+
REGISTER_BWD_LAUNCHER( 2048, fp32, bf16, bf16, bf16, fp32, 1, 1, 4, 16, 4);
|
| 14 |
+
REGISTER_BWD_LAUNCHER( 2048, fp16, fp16, fp16, fp16, fp32, 1, 1, 4, 16, 4);
|
| 15 |
+
REGISTER_BWD_LAUNCHER( 2048, bf16, bf16, bf16, bf16, fp32, 1, 1, 4, 16, 4);
|
Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_2560.cu
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ln_bwd_kernels.cuh"
|
| 2 |
+
|
| 3 |
+
// Create backward launch function and register. Macro signature:
|
| 4 |
+
// HIDDEN_SIZE, WTYPE, ITYPE, RTYPE, OTYPE, CTYPE, CTAS_PER_ROW, WARPS_M, WARPS_N, BYTES_PER_LDG, BYTES_PER_LDG_FINAL
|
| 5 |
+
|
| 6 |
+
REGISTER_BWD_LAUNCHER( 2560, fp32, fp32, fp32, fp32, fp32, 1, 1, 4, 16, 4);
|
| 7 |
+
REGISTER_BWD_LAUNCHER( 2560, fp16, fp32, fp32, fp32, fp32, 1, 1, 4, 16, 4);
|
| 8 |
+
REGISTER_BWD_LAUNCHER( 2560, fp32, fp16, fp32, fp16, fp32, 1, 1, 4, 8, 4);
|
| 9 |
+
REGISTER_BWD_LAUNCHER( 2560, fp16, fp16, fp32, fp16, fp32, 1, 1, 4, 8, 4);
|
| 10 |
+
REGISTER_BWD_LAUNCHER( 2560, fp32, fp16, fp16, fp16, fp32, 1, 1, 4, 8, 4);
|
| 11 |
+
REGISTER_BWD_LAUNCHER( 2560, fp32, bf16, fp32, bf16, fp32, 1, 1, 4, 8, 4);
|
| 12 |
+
REGISTER_BWD_LAUNCHER( 2560, bf16, bf16, fp32, bf16, fp32, 1, 1, 4, 8, 4);
|
| 13 |
+
REGISTER_BWD_LAUNCHER( 2560, fp32, bf16, bf16, bf16, fp32, 1, 1, 4, 8, 4);
|
| 14 |
+
REGISTER_BWD_LAUNCHER( 2560, fp16, fp16, fp16, fp16, fp32, 1, 1, 4, 8, 4);
|
| 15 |
+
REGISTER_BWD_LAUNCHER( 2560, bf16, bf16, bf16, bf16, fp32, 1, 1, 4, 8, 4);
|
Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_4096.cu
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ln_bwd_kernels.cuh"
|
| 2 |
+
|
| 3 |
+
// Create backward launch function and register. Macro signature:
|
| 4 |
+
// HIDDEN_SIZE, WTYPE, ITYPE, RTYPE, OTYPE, CTYPE, CTAS_PER_ROW, WARPS_M, WARPS_N, BYTES_PER_LDG, BYTES_PER_LDG_FINAL
|
| 5 |
+
|
| 6 |
+
REGISTER_BWD_LAUNCHER( 4096, fp32, fp32, fp32, fp32, fp32, 1, 1, 4, 16, 4);
|
| 7 |
+
REGISTER_BWD_LAUNCHER( 4096, fp16, fp32, fp32, fp32, fp32, 1, 1, 4, 16, 4);
|
| 8 |
+
REGISTER_BWD_LAUNCHER( 4096, fp32, fp16, fp32, fp16, fp32, 1, 1, 4, 16, 4);
|
| 9 |
+
REGISTER_BWD_LAUNCHER( 4096, fp16, fp16, fp32, fp16, fp32, 1, 1, 4, 16, 4);
|
| 10 |
+
REGISTER_BWD_LAUNCHER( 4096, fp32, fp16, fp16, fp16, fp32, 1, 1, 4, 16, 4);
|
| 11 |
+
REGISTER_BWD_LAUNCHER( 4096, fp32, bf16, fp32, bf16, fp32, 1, 1, 4, 16, 4);
|
| 12 |
+
REGISTER_BWD_LAUNCHER( 4096, bf16, bf16, fp32, bf16, fp32, 1, 1, 4, 16, 4);
|
| 13 |
+
REGISTER_BWD_LAUNCHER( 4096, fp32, bf16, bf16, bf16, fp32, 1, 1, 4, 16, 4);
|
| 14 |
+
REGISTER_BWD_LAUNCHER( 4096, fp16, fp16, fp16, fp16, fp32, 1, 1, 4, 16, 4);
|
| 15 |
+
REGISTER_BWD_LAUNCHER( 4096, bf16, bf16, bf16, bf16, fp32, 1, 1, 4, 16, 4);
|
Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_5120.cu
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ln_bwd_kernels.cuh"
|
| 2 |
+
|
| 3 |
+
// Create backward launch function and register. Macro signature:
|
| 4 |
+
// HIDDEN_SIZE, WTYPE, ITYPE, RTYPE, OTYPE, CTYPE, CTAS_PER_ROW, WARPS_M, WARPS_N, BYTES_PER_LDG, BYTES_PER_LDG_FINAL
|
| 5 |
+
|
| 6 |
+
REGISTER_BWD_LAUNCHER( 5120, fp32, fp32, fp32, fp32, fp32, 1, 1, 4, 16, 4);
|
| 7 |
+
REGISTER_BWD_LAUNCHER( 5120, fp16, fp32, fp32, fp32, fp32, 1, 1, 4, 16, 4);
|
| 8 |
+
REGISTER_BWD_LAUNCHER( 5120, fp32, fp16, fp32, fp16, fp32, 1, 1, 4, 16, 4);
|
| 9 |
+
REGISTER_BWD_LAUNCHER( 5120, fp16, fp16, fp32, fp16, fp32, 1, 1, 4, 16, 4);
|
| 10 |
+
REGISTER_BWD_LAUNCHER( 5120, fp32, fp16, fp16, fp16, fp32, 1, 1, 4, 16, 4);
|
| 11 |
+
REGISTER_BWD_LAUNCHER( 5120, fp32, bf16, fp32, bf16, fp32, 1, 1, 4, 16, 4);
|
| 12 |
+
REGISTER_BWD_LAUNCHER( 5120, bf16, bf16, fp32, bf16, fp32, 1, 1, 4, 16, 4);
|
| 13 |
+
REGISTER_BWD_LAUNCHER( 5120, fp32, bf16, bf16, bf16, fp32, 1, 1, 4, 16, 4);
|
| 14 |
+
REGISTER_BWD_LAUNCHER( 5120, fp16, fp16, fp16, fp16, fp32, 1, 1, 4, 16, 4);
|
| 15 |
+
REGISTER_BWD_LAUNCHER( 5120, bf16, bf16, bf16, bf16, fp32, 1, 1, 4, 16, 4);
|
Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_6144.cu
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ln_bwd_kernels.cuh"
|
| 2 |
+
|
| 3 |
+
// Create backward launch function and register. Macro signature:
|
| 4 |
+
// HIDDEN_SIZE, WTYPE, ITYPE, RTYPE, OTYPE, CTYPE, CTAS_PER_ROW, WARPS_M, WARPS_N, BYTES_PER_LDG, BYTES_PER_LDG_FINAL
|
| 5 |
+
|
| 6 |
+
REGISTER_BWD_LAUNCHER( 6144, fp32, fp32, fp32, fp32, fp32, 1, 1, 8, 16, 4);
|
| 7 |
+
REGISTER_BWD_LAUNCHER( 6144, fp16, fp32, fp32, fp32, fp32, 1, 1, 8, 16, 4);
|
| 8 |
+
REGISTER_BWD_LAUNCHER( 6144, fp32, fp16, fp32, fp16, fp32, 1, 1, 8, 16, 4);
|
| 9 |
+
REGISTER_BWD_LAUNCHER( 6144, fp16, fp16, fp32, fp16, fp32, 1, 1, 8, 16, 4);
|
| 10 |
+
REGISTER_BWD_LAUNCHER( 6144, fp32, fp16, fp16, fp16, fp32, 1, 1, 8, 16, 4);
|
| 11 |
+
REGISTER_BWD_LAUNCHER( 6144, fp32, bf16, fp32, bf16, fp32, 1, 1, 8, 16, 4);
|
| 12 |
+
REGISTER_BWD_LAUNCHER( 6144, bf16, bf16, fp32, bf16, fp32, 1, 1, 8, 16, 4);
|
| 13 |
+
REGISTER_BWD_LAUNCHER( 6144, fp32, bf16, bf16, bf16, fp32, 1, 1, 8, 16, 4);
|
| 14 |
+
REGISTER_BWD_LAUNCHER( 6144, fp16, fp16, fp16, fp16, fp32, 1, 1, 8, 16, 4);
|
| 15 |
+
REGISTER_BWD_LAUNCHER( 6144, bf16, bf16, bf16, bf16, fp32, 1, 1, 8, 16, 4);
|
Code/Baselines/flash-attention/csrc/layer_norm/ln_bwd_7168.cu
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include "ln_bwd_kernels.cuh"
|
| 2 |
+
|
| 3 |
+
// Create backward launch function and register. Macro signature:
|
| 4 |
+
// HIDDEN_SIZE, WTYPE, ITYPE, RTYPE, OTYPE, CTYPE, CTAS_PER_ROW, WARPS_M, WARPS_N, BYTES_PER_LDG, BYTES_PER_LDG_FINAL
|
| 5 |
+
|
| 6 |
+
REGISTER_BWD_LAUNCHER( 7168, fp32, fp32, fp32, fp32, fp32, 1, 1, 8, 16, 4);
|
| 7 |
+
REGISTER_BWD_LAUNCHER( 7168, fp16, fp32, fp32, fp32, fp32, 1, 1, 8, 16, 4);
|
| 8 |
+
REGISTER_BWD_LAUNCHER( 7168, fp32, fp16, fp32, fp16, fp32, 1, 1, 8, 8, 4);
|
| 9 |
+
REGISTER_BWD_LAUNCHER( 7168, fp16, fp16, fp32, fp16, fp32, 1, 1, 8, 8, 4);
|
| 10 |
+
REGISTER_BWD_LAUNCHER( 7168, fp32, fp16, fp16, fp16, fp32, 1, 1, 8, 8, 4);
|
| 11 |
+
REGISTER_BWD_LAUNCHER( 7168, fp32, bf16, fp32, bf16, fp32, 1, 1, 8, 8, 4);
|
| 12 |
+
REGISTER_BWD_LAUNCHER( 7168, bf16, bf16, fp32, bf16, fp32, 1, 1, 8, 8, 4);
|
| 13 |
+
REGISTER_BWD_LAUNCHER( 7168, fp32, bf16, bf16, bf16, fp32, 1, 1, 8, 8, 4);
|
| 14 |
+
REGISTER_BWD_LAUNCHER( 7168, fp16, fp16, fp16, fp16, fp32, 1, 1, 8, 8, 4);
|
| 15 |
+
REGISTER_BWD_LAUNCHER( 7168, bf16, bf16, bf16, bf16, fp32, 1, 1, 8, 8, 4);
|