Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Code/Baselines/flash-attention/csrc/composable_kernel/.github/ISSUE_TEMPLATE/config.yml +1 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/.github/ISSUE_TEMPLATE/issue_report.yml +221 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/conceptual/Composable-Kernel-math.rst +77 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/conceptual/Composable-Kernel-structure.rst +29 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/doxygen/Doxyfile +0 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/install/Composable-Kernel-Docker.rst +16 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/install/Composable-Kernel-install.rst +72 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/install/Composable-Kernel-prerequisites.rst +32 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/reference/Composable-Kernel-wrapper.rst +89 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/reference/Composable_Kernel_custom_types.rst +39 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/reference/Composable_Kernel_supported_scalar_types.rst +69 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/reference/Composable_Kernel_vector_utilities.rst +16 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/sphinx/_toc.yml.in +45 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/sphinx/requirements.in +2 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/sphinx/requirements.txt +335 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/docs/tutorial/Composable-Kernel-examples.rst +40 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/data_type_enum.hpp +21 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_avg_pool2d_bwd_impl.hpp +255 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batched_gemm_b_scale_impl.hpp +488 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batched_gemm_gemm_impl.hpp +319 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batched_gemm_softmax_gemm_impl.hpp +347 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batched_gemm_softmax_gemm_permute_impl.hpp +367 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batchnorm_backward_impl.hpp +390 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batchnorm_forward_impl.hpp +412 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_contraction_utils.hpp +83 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_conv_bwd_data_impl.hpp +271 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_conv_fwd_impl.hpp +244 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_elementwise_layernorm_impl.hpp +277 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_ab_scale_impl.hpp +363 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_add_fastgelu_impl.hpp +232 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_add_impl.hpp +232 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_add_relu_add_layernorm_impl.hpp +349 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_add_relu_impl.hpp +232 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_bias_add_reduce_impl.hpp +384 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_blockscale_wp_impl.hpp +415 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_multiply_add_impl.hpp +242 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_multiply_multiply_impl.hpp +359 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_mx_impl.hpp +534 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_reduce_impl.hpp +353 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_splitk_impl.hpp +302 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_universal_batched_impl.hpp +292 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_universal_impl.hpp +413 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_universal_reduce_impl.hpp +323 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_conv_fwd_bias_clamp_impl.hpp +323 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_conv_fwd_impl.hpp +261 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_conv_fwd_outelementop_impl.hpp +352 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_gemm_fastgelu_impl.hpp +280 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_gemm_fixed_nk_impl.hpp +374 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_gemm_impl.hpp +379 -0
- Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_groupnorm_fwd_impl.hpp +274 -0
Code/Baselines/flash-attention/csrc/composable_kernel/.github/ISSUE_TEMPLATE/config.yml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
blank_issues_enabled: true
|
Code/Baselines/flash-attention/csrc/composable_kernel/.github/ISSUE_TEMPLATE/issue_report.yml
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Issue Report
|
| 2 |
+
description: File a report for ROCm related issues on Linux and Windows. For issues pertaining to documentation or non-bug related, please open a blank issue located below.
|
| 3 |
+
title: "[Issue]: "
|
| 4 |
+
|
| 5 |
+
body:
|
| 6 |
+
- type: markdown
|
| 7 |
+
attributes:
|
| 8 |
+
value: |
|
| 9 |
+
Thank you for taking the time to fill out this report!
|
| 10 |
+
|
| 11 |
+
You can acquire your OS, CPU, GPU (for filling out this report) with the following commands:
|
| 12 |
+
|
| 13 |
+
Linux:
|
| 14 |
+
echo "OS:" && cat /etc/os-release | grep -E "^(NAME=|VERSION=)";
|
| 15 |
+
echo "CPU: " && cat /proc/cpuinfo | grep "model name" | sort --unique;
|
| 16 |
+
echo "GPU:" && /opt/rocm/bin/rocminfo | grep -E "^\s*(Name|Marketing Name)";
|
| 17 |
+
|
| 18 |
+
Windows:
|
| 19 |
+
(Get-WmiObject Win32_OperatingSystem).Version
|
| 20 |
+
(Get-WmiObject win32_Processor).Name
|
| 21 |
+
(Get-WmiObject win32_VideoController).Name
|
| 22 |
+
- type: textarea
|
| 23 |
+
attributes:
|
| 24 |
+
label: Problem Description
|
| 25 |
+
description: Describe the issue you encountered.
|
| 26 |
+
validations:
|
| 27 |
+
required: true
|
| 28 |
+
- type: input
|
| 29 |
+
attributes:
|
| 30 |
+
label: Operating System
|
| 31 |
+
description: What is the name and version number of the OS?
|
| 32 |
+
placeholder: "e.g. Ubuntu 22.04.3 LTS (Jammy Jellyfish)"
|
| 33 |
+
validations:
|
| 34 |
+
required: true
|
| 35 |
+
- type: input
|
| 36 |
+
attributes:
|
| 37 |
+
label: CPU
|
| 38 |
+
description: What CPU did you encounter the issue on?
|
| 39 |
+
placeholder: "e.g. AMD Ryzen 9 5900HX with Radeon Graphics"
|
| 40 |
+
validations:
|
| 41 |
+
required: true
|
| 42 |
+
- type: dropdown
|
| 43 |
+
attributes:
|
| 44 |
+
label: GPU
|
| 45 |
+
description: What GPU(s) did you encounter the issue on (you can select multiple GPUs from the list)
|
| 46 |
+
multiple: true
|
| 47 |
+
options:
|
| 48 |
+
- AMD Instinct MI300X
|
| 49 |
+
- AMD Instinct MI300A
|
| 50 |
+
- AMD Instinct MI300
|
| 51 |
+
- AMD Instinct MI250X
|
| 52 |
+
- AMD Instinct MI250
|
| 53 |
+
- AMD Instinct MI210
|
| 54 |
+
- AMD Instinct MI100
|
| 55 |
+
- AMD Instinct MI50
|
| 56 |
+
- AMD Instinct MI25
|
| 57 |
+
- AMD Radeon Pro V620
|
| 58 |
+
- AMD Radeon Pro VII
|
| 59 |
+
- AMD Radeon RX 7900 XTX
|
| 60 |
+
- AMD Radeon VII
|
| 61 |
+
- AMD Radeon Pro W7900
|
| 62 |
+
- AMD Radeon Pro W7800
|
| 63 |
+
- AMD Radeon Pro W6800
|
| 64 |
+
- AMD Radeon Pro W6600
|
| 65 |
+
- AMD Radeon Pro W5500
|
| 66 |
+
- AMD Radeon RX 7900 XT
|
| 67 |
+
- AMD Radeon RX 7600
|
| 68 |
+
- AMD Radeon RX 6950 XT
|
| 69 |
+
- AMD Radeon RX 6900 XT
|
| 70 |
+
- AMD Radeon RX 6800 XT
|
| 71 |
+
- AMD Radeon RX 6800
|
| 72 |
+
- AMD Radeon RX 6750
|
| 73 |
+
- AMD Radeon RX 6700 XT
|
| 74 |
+
- AMD Radeon RX 6700
|
| 75 |
+
- AMD Radeon RX 6650 XT
|
| 76 |
+
- AMD Radeon RX 6600 XT
|
| 77 |
+
- AMD Radeon RX 6600
|
| 78 |
+
- Other
|
| 79 |
+
validations:
|
| 80 |
+
required: true
|
| 81 |
+
- type: input
|
| 82 |
+
attributes:
|
| 83 |
+
label: Other
|
| 84 |
+
description: If you selected Other, please specify
|
| 85 |
+
- type: dropdown
|
| 86 |
+
attributes:
|
| 87 |
+
label: ROCm Version
|
| 88 |
+
description: What version(s) of ROCm did you encounter the issue on?
|
| 89 |
+
multiple: true
|
| 90 |
+
options:
|
| 91 |
+
- ROCm 6.0.0
|
| 92 |
+
- ROCm 5.7.1
|
| 93 |
+
- ROCm 5.7.0
|
| 94 |
+
- ROCm 5.6.1
|
| 95 |
+
- ROCm 5.6.0
|
| 96 |
+
- ROCm 5.5.1
|
| 97 |
+
- ROCm 5.5.0
|
| 98 |
+
validations:
|
| 99 |
+
required: true
|
| 100 |
+
- type: dropdown
|
| 101 |
+
attributes:
|
| 102 |
+
label: ROCm Component
|
| 103 |
+
description: (Optional) If this issue relates to a specific ROCm component, it can be mentioned here.
|
| 104 |
+
multiple: true
|
| 105 |
+
options:
|
| 106 |
+
- Other
|
| 107 |
+
- AMD Common Language Runtime
|
| 108 |
+
- AMD MIGraphX
|
| 109 |
+
- AMD System Management Interface
|
| 110 |
+
- amdgpu KCL/autoconf
|
| 111 |
+
- amdgpu Kernel-mode GPU Driver
|
| 112 |
+
- amdgpu-install
|
| 113 |
+
- AOMP
|
| 114 |
+
- AOMP Extras
|
| 115 |
+
- AqlProfile
|
| 116 |
+
- build-infra
|
| 117 |
+
- chelsio
|
| 118 |
+
- clang-ocl
|
| 119 |
+
- Composable Kernel
|
| 120 |
+
- dkms
|
| 121 |
+
- docker / ROCm-docker
|
| 122 |
+
- flang
|
| 123 |
+
- gpuburn
|
| 124 |
+
- half
|
| 125 |
+
- HIP
|
| 126 |
+
- HIP Examples
|
| 127 |
+
- hipBLAS
|
| 128 |
+
- hipBLASLt
|
| 129 |
+
- HIPCC
|
| 130 |
+
- hipCUB
|
| 131 |
+
- hip-examples-private
|
| 132 |
+
- hipFFT
|
| 133 |
+
- hipfort
|
| 134 |
+
- HIPIFY
|
| 135 |
+
- hipRAND
|
| 136 |
+
- hipSOLVER
|
| 137 |
+
- hipSPARSE
|
| 138 |
+
- hipSPARSELt
|
| 139 |
+
- hipTensor
|
| 140 |
+
- hip-tests
|
| 141 |
+
- HSA Runtime
|
| 142 |
+
- infrastructure
|
| 143 |
+
- jenkins-utils
|
| 144 |
+
- libdrm
|
| 145 |
+
- Linux BPI packaging framework
|
| 146 |
+
- llvm-project
|
| 147 |
+
- Mesa
|
| 148 |
+
- meta
|
| 149 |
+
- MIOpen
|
| 150 |
+
- MIVisionX
|
| 151 |
+
- ml-framework-ci
|
| 152 |
+
- MLSEQA_TestRepo
|
| 153 |
+
- OpenCL API C++ Bindings
|
| 154 |
+
- OpenCL API Headers
|
| 155 |
+
- OpenCL Conformance Test Suite
|
| 156 |
+
- OpenCL ICD Loader
|
| 157 |
+
- perftest-p2p
|
| 158 |
+
- prototype
|
| 159 |
+
- RCCL
|
| 160 |
+
- rccl-rdma-sharp-plugins
|
| 161 |
+
- rocALUTION
|
| 162 |
+
- rocBLAS
|
| 163 |
+
- ROCdbgapi
|
| 164 |
+
- ROCdebug-agent
|
| 165 |
+
- rocFFT
|
| 166 |
+
- ROCgdb
|
| 167 |
+
- ROCK
|
| 168 |
+
- ROCm Documentation/Website
|
| 169 |
+
- ROCm Data Center Tool
|
| 170 |
+
- ROCm Examples
|
| 171 |
+
- ROCm for Windows
|
| 172 |
+
- ROCm Performance Primitives
|
| 173 |
+
- ROCm System Management Interface Library
|
| 174 |
+
- ROCm Thrust
|
| 175 |
+
- ROCm Validation Suite
|
| 176 |
+
- rocm_bandwidth_test
|
| 177 |
+
- rocm-cmake
|
| 178 |
+
- rocm-core
|
| 179 |
+
- rocm-docs-core
|
| 180 |
+
- rocminfo
|
| 181 |
+
- rocMLIR
|
| 182 |
+
- rocmtools
|
| 183 |
+
- rocPRIM
|
| 184 |
+
- rocprofiler
|
| 185 |
+
- rocRAND
|
| 186 |
+
- ROCR-Runtime
|
| 187 |
+
- rocSOLVER
|
| 188 |
+
- rocSPARSE
|
| 189 |
+
- roctracer
|
| 190 |
+
- ROCT-Thunk-Interface
|
| 191 |
+
- rocWMMA
|
| 192 |
+
- Tensile
|
| 193 |
+
- umr
|
| 194 |
+
- ibv_rc_pingpong-amd
|
| 195 |
+
- mellanox
|
| 196 |
+
- mpitest
|
| 197 |
+
- Pytorch
|
| 198 |
+
- Tensorflow
|
| 199 |
+
- APEX
|
| 200 |
+
- torchvision
|
| 201 |
+
- Magma
|
| 202 |
+
- type: textarea
|
| 203 |
+
attributes:
|
| 204 |
+
label: Steps to Reproduce
|
| 205 |
+
description: (Optional) Detailed steps to reproduce the issue.
|
| 206 |
+
validations:
|
| 207 |
+
required: false
|
| 208 |
+
|
| 209 |
+
- type: textarea
|
| 210 |
+
attributes:
|
| 211 |
+
label: (Optional for Linux users) Output of /opt/rocm/bin/rocminfo --support
|
| 212 |
+
description: The output of rocminfo --support could help to better address the problem.
|
| 213 |
+
validations:
|
| 214 |
+
required: false
|
| 215 |
+
|
| 216 |
+
- type: textarea
|
| 217 |
+
attributes:
|
| 218 |
+
label: Additional Information
|
| 219 |
+
description: (Optional) Any additional information that is relevant, e.g. relevant environment variables, dockerfiles, log files, dmesg output (on Linux), etc.
|
| 220 |
+
validations:
|
| 221 |
+
required: false
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/conceptual/Composable-Kernel-math.rst
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel mathematical basis
|
| 3 |
+
:keywords: composable kernel, CK, ROCm, API, mathematics, algorithm
|
| 4 |
+
|
| 5 |
+
.. _supported-primitives:
|
| 6 |
+
|
| 7 |
+
********************************************************************
|
| 8 |
+
Composable Kernel mathematical basis
|
| 9 |
+
********************************************************************
|
| 10 |
+
|
| 11 |
+
This is an introduction to the math which underpins the algorithms implemented in Composable Kernel.
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
For vectors :math:`x^{(1)}, x^{(2)}, \ldots, x^{(T)}` of size :math:`B` you can decompose the
|
| 15 |
+
softmax of concatenated :math:`x = [ x^{(1)}\ | \ \ldots \ | \ x^{(T)} ]` as,
|
| 16 |
+
|
| 17 |
+
.. math::
|
| 18 |
+
:nowrap:
|
| 19 |
+
|
| 20 |
+
\begin{align}
|
| 21 |
+
m(x) & = m( [ x^{(1)}\ | \ \ldots \ | \ x^{(T)} ] ) = \max( m(x^{(1)}),\ldots, m(x^{(T)}) ) \\
|
| 22 |
+
f(x) & = [\exp( m(x^{(1)}) - m(x) ) f( x^{(1)} )\ | \ \ldots \ | \ \exp( m(x^{(T)}) - m(x) ) f( x^{(T)} )] \\
|
| 23 |
+
z(x) & = \exp( m(x^{(1)}) - m(x) )\ z(x^{(1)}) + \ldots + \exp( m(x^{(T)}) - m(x) )\ z(x^{(1)}) \\
|
| 24 |
+
\operatorname{softmax}(x) &= f(x)\ / \ z(x)
|
| 25 |
+
\end{align}
|
| 26 |
+
|
| 27 |
+
where :math:`f(x^{(j)}) = \exp( x^{(j)} - m(x^{(j)}) )` is of size :math:`B` and
|
| 28 |
+
:math:`z(x^{(j)}) = f(x_1^{(j)})+ \ldots+ f(x_B^{(j)})` is a scalar.
|
| 29 |
+
|
| 30 |
+
For a matrix :math:`X` composed of :math:`T_r \times T_c` tiles, :math:`X_{ij}`, of size
|
| 31 |
+
:math:`B_r \times B_c` you can compute the row-wise softmax as follows.
|
| 32 |
+
|
| 33 |
+
For :math:`j` from :math:`1` to :math:`T_c`, and :math:`i` from :math:`1` to :math:`T_r` calculate,
|
| 34 |
+
|
| 35 |
+
.. math::
|
| 36 |
+
:nowrap:
|
| 37 |
+
|
| 38 |
+
\begin{align}
|
| 39 |
+
\tilde{m}_{ij} &= \operatorname{rowmax}( X_{ij} ) \\
|
| 40 |
+
\tilde{P}_{ij} &= \exp(X_{ij} - \tilde{m}_{ij} ) \\
|
| 41 |
+
\tilde{z}_{ij} &= \operatorname{rowsum}( P_{ij} ) \\
|
| 42 |
+
\end{align}
|
| 43 |
+
|
| 44 |
+
If :math:`j=1`, initialize running max, running sum, and the first column block of the output,
|
| 45 |
+
|
| 46 |
+
.. math::
|
| 47 |
+
:nowrap:
|
| 48 |
+
|
| 49 |
+
\begin{align}
|
| 50 |
+
m_i &= \tilde{m}_{i1} \\
|
| 51 |
+
z_i &= \tilde{z}_{i1} \\
|
| 52 |
+
\tilde{Y}_{i1} &= \diag(\tilde{z}_{ij})^{-1} \tilde{P}_{i1}
|
| 53 |
+
\end{align}
|
| 54 |
+
|
| 55 |
+
Else if :math:`j>1`,
|
| 56 |
+
|
| 57 |
+
1. Update running max, running sum and column blocks :math:`k=1` to :math:`k=j-1`
|
| 58 |
+
|
| 59 |
+
.. math::
|
| 60 |
+
:nowrap:
|
| 61 |
+
|
| 62 |
+
\begin{align}
|
| 63 |
+
m^{new}_i &= \max(m_i, \tilde{m}_{ij} ) \\
|
| 64 |
+
z^{new}_i &= \exp(m_i - m^{new}_i)\ z_i + \exp( \tilde{m}_{ij} - m^{new}_i )\ \tilde{z}_{ij} \\
|
| 65 |
+
Y_{ik} &= \diag(z^{new}_{i})^{-1} \diag(z_{i}) \exp(m_i - m^{new}_i)\ Y_{ik}
|
| 66 |
+
\end{align}
|
| 67 |
+
|
| 68 |
+
2. Initialize column block :math:`j` of output and reset running max and running sum variables:
|
| 69 |
+
|
| 70 |
+
.. math::
|
| 71 |
+
:nowrap:
|
| 72 |
+
|
| 73 |
+
\begin{align}
|
| 74 |
+
\tilde{Y}_{ij} &= \diag(z^{new}_{i})^{-1} \exp(\tilde{m}_{ij} - m^{new}_i ) \tilde{P}_{ij} \\
|
| 75 |
+
z_i &= z^{new}_i \\
|
| 76 |
+
m_i &= m^{new}_i \\
|
| 77 |
+
\end{align}
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/conceptual/Composable-Kernel-structure.rst
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel structure
|
| 3 |
+
:keywords: composable kernel, CK, ROCm, API, structure
|
| 4 |
+
|
| 5 |
+
.. _what-is-ck:
|
| 6 |
+
|
| 7 |
+
********************************************************************
|
| 8 |
+
Composable Kernel structure
|
| 9 |
+
********************************************************************
|
| 10 |
+
|
| 11 |
+
The Composable Kernel library uses a tile-based programming model and tensor coordinate transformation to achieve performance portability and code maintainability. Tensor coordinate transformation is a complexity reduction technique for complex machine learning operators.
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
.. image:: ../data/ck_component.png
|
| 15 |
+
:alt: CK Components
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
The Composable Kernel library consists of four layers:
|
| 19 |
+
|
| 20 |
+
* a templated tile operator layer
|
| 21 |
+
* a templated kernel and invoker layer
|
| 22 |
+
* an instantiated kernel and invoker layer
|
| 23 |
+
* a client API layer.
|
| 24 |
+
|
| 25 |
+
A wrapper component is included to simplify tensor transform operations.
|
| 26 |
+
|
| 27 |
+
.. image:: ../data/ck_layer.png
|
| 28 |
+
:alt: CK Layers
|
| 29 |
+
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/doxygen/Doxyfile
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/install/Composable-Kernel-Docker.rst
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel docker files
|
| 3 |
+
:keywords: composable kernel, CK, ROCm, API, docker
|
| 4 |
+
|
| 5 |
+
.. _docker-hub:
|
| 6 |
+
|
| 7 |
+
********************************************************************
|
| 8 |
+
Composable Kernel Docker containers
|
| 9 |
+
********************************************************************
|
| 10 |
+
|
| 11 |
+
Docker images that include all the required prerequisites for building Composable Kernel are available on `Docker Hub <https://hub.docker.com/r/rocm/composable_kernel/tags>`_.
|
| 12 |
+
|
| 13 |
+
The images also contain `ROCm <https://rocm.docs.amd.com/en/latest/index.html>`_, `CMake <https://cmake.org/getting-started/>`_, and the `ROCm LLVM compiler infrastructure <https://rocm.docs.amd.com/projects/llvm-project/en/latest/index.html>`_.
|
| 14 |
+
|
| 15 |
+
Composable Kernel Docker images are named according to their operating system and ROCm version. For example, a Docker image named ``ck_ub22.04_rocm6.3`` would correspond to an Ubuntu 22.04 image with ROCm 6.3.
|
| 16 |
+
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/install/Composable-Kernel-install.rst
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel build and install
|
| 3 |
+
:keywords: composable kernel, CK, ROCm, API, documentation, install
|
| 4 |
+
|
| 5 |
+
******************************************************
|
| 6 |
+
Building and installing Composable Kernel with CMake
|
| 7 |
+
******************************************************
|
| 8 |
+
|
| 9 |
+
Before you begin, clone the `Composable Kernel GitHub repository <https://github.com/ROCm/composable_kernel.git>`_ and create a ``build`` directory in its root:
|
| 10 |
+
|
| 11 |
+
.. code:: shell
|
| 12 |
+
|
| 13 |
+
git clone https://github.com/ROCm/composable_kernel.git
|
| 14 |
+
cd composable_kernel
|
| 15 |
+
mkdir build
|
| 16 |
+
|
| 17 |
+
Change directory to the ``build`` directory and generate the makefile using the ``cmake`` command. Two build options are required:
|
| 18 |
+
|
| 19 |
+
* ``CMAKE_PREFIX_PATH``: The ROCm installation path. ROCm is installed in ``/opt/rocm`` by default.
|
| 20 |
+
* ``CMAKE_CXX_COMPILER``: The path to the Clang compiler. Clang is found at ``/opt/rocm/llvm/bin/clang++`` by default.
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
.. code:: shell
|
| 24 |
+
|
| 25 |
+
cd build
|
| 26 |
+
cmake ../. -D CMAKE_PREFIX_PATH="/opt/rocm" -D CMAKE_CXX_COMPILER="/opt/rocm/llvm/bin/clang++" [-D<OPTION1=VALUE1> [-D<OPTION2=VALUE2>] ...]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
Other build options are:
|
| 30 |
+
|
| 31 |
+
* ``DISABLE_DL_KERNELS``: Set this to "ON" to not build deep learning (DL) and data parallel primitive (DPP) instances.
|
| 32 |
+
|
| 33 |
+
.. note::
|
| 34 |
+
|
| 35 |
+
DL and DPP instances are useful on architectures that don't support XDL or WMMA.
|
| 36 |
+
|
| 37 |
+
* ``CK_USE_FP8_ON_UNSUPPORTED_ARCH``: Set to ``ON`` to build FP8 data type instances on gfx90a without native FP8 support.
|
| 38 |
+
* ``GPU_TARGETS``: Target architectures. Target architectures in this list must all be different versions of the same architectures. Enclose the list of targets in quotation marks. Separate multiple targets with semicolons (``;``). For example, ``cmake -D GPU_TARGETS="gfx908;gfx90a"``. This option is required to build tests and examples.
|
| 39 |
+
* ``GPU_ARCHS``: Target architectures. Target architectures in this list are not limited to different versions of the same architectures. Enclose the list of targets in quotation marks. Separate multiple targets with semicolons (``;``). For example, ``cmake -D GPU_TARGETS="gfx908;gfx1100"``.
|
| 40 |
+
* ``CMAKE_BUILD_TYPE``: The build type. Can be ``None``, ``Release``, ``Debug``, ``RelWithDebInfo``, or ``MinSizeRel``. CMake will use ``Release`` by default.
|
| 41 |
+
|
| 42 |
+
.. Note::
|
| 43 |
+
|
| 44 |
+
If neither ``GPU_TARGETS`` nor ``GPU_ARCHS`` is specified, Composable Kernel will be built for all targets supported by the compiler.
|
| 45 |
+
|
| 46 |
+
Build Composable Kernel using the generated makefile. This will build the library, the examples, and the tests, and save them to ``bin``.
|
| 47 |
+
|
| 48 |
+
.. code:: shell
|
| 49 |
+
|
| 50 |
+
make -j20
|
| 51 |
+
|
| 52 |
+
The ``-j`` option speeds up the build by using multiple threads in parallel. For example, ``-j20`` uses twenty threads in parallel. On average, each thread will use 2GB of memory. Make sure that the number of threads you use doesn't exceed the available memory in your system.
|
| 53 |
+
|
| 54 |
+
Using ``-j`` alone will launch an unlimited number of threads and is not recommended.
|
| 55 |
+
|
| 56 |
+
Install the Composable Kernel library:
|
| 57 |
+
|
| 58 |
+
.. code:: shell
|
| 59 |
+
|
| 60 |
+
make install
|
| 61 |
+
|
| 62 |
+
After running ``make install``, the Composable Kernel files will be saved to the following locations:
|
| 63 |
+
|
| 64 |
+
* Library files: ``/opt/rocm/lib/``
|
| 65 |
+
* Header files: ``/opt/rocm/include/ck/`` and ``/opt/rocm/include/ck_tile/``
|
| 66 |
+
* Examples, tests, and ckProfiler: ``/opt/rocm/bin/``
|
| 67 |
+
|
| 68 |
+
For information about ckProfiler, see `the ckProfiler readme file <https://github.com/ROCm/composable_kernel/blob/develop/profiler/README.md>`_.
|
| 69 |
+
|
| 70 |
+
For information about running the examples and tests, see :doc:`Composable Kernel examples and tests <../tutorial/Composable-Kernel-examples>`.
|
| 71 |
+
|
| 72 |
+
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/install/Composable-Kernel-prerequisites.rst
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel prerequisites
|
| 3 |
+
:keywords: composable kernel, CK, ROCm, API, documentation, prerequisites
|
| 4 |
+
|
| 5 |
+
******************************************************
|
| 6 |
+
Composable Kernel prerequisites
|
| 7 |
+
******************************************************
|
| 8 |
+
|
| 9 |
+
Docker images that include all the required prerequisites for building Composable Kernel are available on `Docker Hub <https://hub.docker.com/r/rocm/composable_kernel/tags>`_.
|
| 10 |
+
|
| 11 |
+
The following prerequisites are required to build and install Composable Kernel:
|
| 12 |
+
|
| 13 |
+
* cmake
|
| 14 |
+
* hip-rocclr
|
| 15 |
+
* iputils-ping
|
| 16 |
+
* jq
|
| 17 |
+
* libelf-dev
|
| 18 |
+
* libncurses5-dev
|
| 19 |
+
* libnuma-dev
|
| 20 |
+
* libpthread-stubs0-dev
|
| 21 |
+
* llvm-amdgpu
|
| 22 |
+
* mpich
|
| 23 |
+
* net-tools
|
| 24 |
+
* python3
|
| 25 |
+
* python3-dev
|
| 26 |
+
* python3-pip
|
| 27 |
+
* redis
|
| 28 |
+
* rocm-llvm-dev
|
| 29 |
+
* zlib1g-dev
|
| 30 |
+
* libzstd-dev
|
| 31 |
+
* openssh-server
|
| 32 |
+
* clang-format-12
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/reference/Composable-Kernel-wrapper.rst
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel wrapper
|
| 3 |
+
:keywords: composable kernel, CK, ROCm, API, wrapper
|
| 4 |
+
|
| 5 |
+
.. _wrapper:
|
| 6 |
+
|
| 7 |
+
********************************************************************
|
| 8 |
+
Composable Kernel wrapper
|
| 9 |
+
********************************************************************
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
The Composable Kernel library provides a lightweight wrapper to simplify the more complex operations.
|
| 13 |
+
|
| 14 |
+
Example:
|
| 15 |
+
|
| 16 |
+
.. code-block:: c
|
| 17 |
+
|
| 18 |
+
const auto shape_4x2x4 = ck::make_tuple(4, ck::make_tuple(2, 4));
|
| 19 |
+
const auto strides_s2x1x8 = ck::make_tuple(2, ck::make_tuple(1, 8));
|
| 20 |
+
const auto layout = ck::wrapper::make_layout(shape_4x2x4, strides_s2x1x8);
|
| 21 |
+
|
| 22 |
+
std::array<ck::index_t, 32> data;
|
| 23 |
+
auto tensor = ck::wrapper::make_tensor<ck::wrapper::MemoryTypeEnum::Generic>(&data[0], layout);
|
| 24 |
+
|
| 25 |
+
for(ck::index_t w = 0; w < size(tensor); w++) {
|
| 26 |
+
tensor(w) = w;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
// slice() == slice(0, -1) (whole dimension)
|
| 30 |
+
auto tensor_slice = tensor(ck::wrapper::slice(1, 3), ck::make_tuple(ck::wrapper::slice(), ck::wrapper::slice()));
|
| 31 |
+
std::cout << "dims:2,(2,4) strides:2,(1,8)" << std::endl;
|
| 32 |
+
for(ck::index_t h = 0; h < ck::wrapper::size<0>(tensor_slice); h++)
|
| 33 |
+
{
|
| 34 |
+
for(ck::index_t w = 0; w < ck::wrapper::size<1>(tensor_slice); w++)
|
| 35 |
+
{
|
| 36 |
+
std::cout << tensor_slice(h, w) << " ";
|
| 37 |
+
}
|
| 38 |
+
std::cout << std::endl;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
Output::
|
| 42 |
+
|
| 43 |
+
dims:2,(2,4) strides:2,(1,8)
|
| 44 |
+
1 5 9 13 17 21 25 29
|
| 45 |
+
2 6 10 14 18 22 26 30
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
Tutorials:
|
| 49 |
+
|
| 50 |
+
* `GEMM tutorial <https://github.com/ROCm/composable_kernel/blob/develop/client_example/25_wrapper/README.md>`_
|
| 51 |
+
|
| 52 |
+
Advanced examples:
|
| 53 |
+
|
| 54 |
+
* `Image to column <https://github.com/ROCm/composable_kernel/blob/develop/client_example/25_wrapper/wrapper_img2col.cpp>`_
|
| 55 |
+
* `Basic gemm <https://github.com/ROCm/composable_kernel/blob/develop/client_example/25_wrapper/wrapper_basic_gemm.cpp>`_
|
| 56 |
+
* `Optimized gemm <https://github.com/ROCm/composable_kernel/blob/develop/client_example/25_wrapper/wrapper_optimized_gemm.cpp>`_
|
| 57 |
+
|
| 58 |
+
-------------------------------------
|
| 59 |
+
Layout
|
| 60 |
+
-------------------------------------
|
| 61 |
+
|
| 62 |
+
.. doxygenstruct:: Layout
|
| 63 |
+
|
| 64 |
+
-------------------------------------
|
| 65 |
+
Layout helpers
|
| 66 |
+
-------------------------------------
|
| 67 |
+
|
| 68 |
+
.. doxygenfile:: include/ck/wrapper/utils/layout_utils.hpp
|
| 69 |
+
|
| 70 |
+
-------------------------------------
|
| 71 |
+
Tensor
|
| 72 |
+
-------------------------------------
|
| 73 |
+
|
| 74 |
+
.. doxygenstruct:: Tensor
|
| 75 |
+
|
| 76 |
+
-------------------------------------
|
| 77 |
+
Tensor helpers
|
| 78 |
+
-------------------------------------
|
| 79 |
+
|
| 80 |
+
.. doxygenfile:: include/ck/wrapper/utils/tensor_utils.hpp
|
| 81 |
+
|
| 82 |
+
.. doxygenfile:: include/ck/wrapper/utils/tensor_partition.hpp
|
| 83 |
+
|
| 84 |
+
-------------------------------------
|
| 85 |
+
Operations
|
| 86 |
+
-------------------------------------
|
| 87 |
+
|
| 88 |
+
.. doxygenfile:: include/ck/wrapper/operations/copy.hpp
|
| 89 |
+
.. doxygenfile:: include/ck/wrapper/operations/gemm.hpp
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/reference/Composable_Kernel_custom_types.rst
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel supported custom types
|
| 3 |
+
:keywords: composable kernel, custom, data types, support, CK, ROCm
|
| 4 |
+
|
| 5 |
+
******************************************************
|
| 6 |
+
Composable Kernel custom data types
|
| 7 |
+
******************************************************
|
| 8 |
+
|
| 9 |
+
Composable Kernel supports the use of custom types that provide a way to implement specialized numerical formats.
|
| 10 |
+
|
| 11 |
+
To use custom types, a C++ type that implements the necessary operations for tensor computations needs to be created. These should include:
|
| 12 |
+
|
| 13 |
+
* Constructors and initialization methods
|
| 14 |
+
* Arithmetic operators if the type will be used in computational operations
|
| 15 |
+
* Any conversion functions needed to interface with other parts of an application
|
| 16 |
+
|
| 17 |
+
For example, to create a complex half-precision type:
|
| 18 |
+
|
| 19 |
+
.. code:: cpp
|
| 20 |
+
|
| 21 |
+
struct complex_half_t
|
| 22 |
+
{
|
| 23 |
+
half_t real;
|
| 24 |
+
half_t img;
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
struct complex_half_t
|
| 28 |
+
{
|
| 29 |
+
using type = half_t;
|
| 30 |
+
type real;
|
| 31 |
+
type img;
|
| 32 |
+
|
| 33 |
+
complex_half_t() : real{type{}}, img{type{}} {}
|
| 34 |
+
complex_half_t(type real_init, type img_init) : real{real_init}, img{img_init} {}
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
Custom types can be particularly useful for specialized applications such as complex number arithmetic,
|
| 38 |
+
custom quantization schemes, or domain-specific number representations.
|
| 39 |
+
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/reference/Composable_Kernel_supported_scalar_types.rst
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel supported scalar types
|
| 3 |
+
:keywords: composable kernel, scalar, data types, support, CK, ROCm
|
| 4 |
+
|
| 5 |
+
***************************************************
|
| 6 |
+
Composable Kernel supported scalar data types
|
| 7 |
+
***************************************************
|
| 8 |
+
|
| 9 |
+
The Composable Kernel library provides support for the following scalar data types:
|
| 10 |
+
|
| 11 |
+
.. list-table::
|
| 12 |
+
:header-rows: 1
|
| 13 |
+
:widths: 25 15 60
|
| 14 |
+
|
| 15 |
+
* - Type
|
| 16 |
+
- Bit Width
|
| 17 |
+
- Description
|
| 18 |
+
|
| 19 |
+
* - ``double``
|
| 20 |
+
- 64-bit
|
| 21 |
+
- Standard IEEE 754 double precision floating point
|
| 22 |
+
|
| 23 |
+
* - ``float``
|
| 24 |
+
- 32-bit
|
| 25 |
+
- Standard IEEE 754 single precision floating point
|
| 26 |
+
|
| 27 |
+
* - ``int32_t``
|
| 28 |
+
- 32-bit
|
| 29 |
+
- Standard signed 32-bit integer
|
| 30 |
+
|
| 31 |
+
* - ``int8_t``
|
| 32 |
+
- 8-bit
|
| 33 |
+
- Standard signed 8-bit integer
|
| 34 |
+
|
| 35 |
+
* - ``uint8_t``
|
| 36 |
+
- 8-bit
|
| 37 |
+
- Standard unsigned 8-bit integer
|
| 38 |
+
|
| 39 |
+
* - ``bool``
|
| 40 |
+
- 1-bit
|
| 41 |
+
- Boolean type
|
| 42 |
+
|
| 43 |
+
* - ``ck::half_t``
|
| 44 |
+
- 16-bit
|
| 45 |
+
- IEEE 754 half precision floating point with 5 exponent bits, 10 mantissa bits, and 1 sign bit
|
| 46 |
+
|
| 47 |
+
* - ``ck::bhalf_t``
|
| 48 |
+
- 16-bit
|
| 49 |
+
- Brain floating point with 8 exponent bits, 7 mantissa bits, and 1 sign bit
|
| 50 |
+
|
| 51 |
+
* - ``ck::f8_t``
|
| 52 |
+
- 8-bit
|
| 53 |
+
- 8-bit floating point (E4M3 format) with 4 exponent bits, 3 mantissa bits, and 1 sign bit
|
| 54 |
+
|
| 55 |
+
* - ``ck::bf8_t``
|
| 56 |
+
- 8-bit
|
| 57 |
+
- 8-bit brain floating point (E5M2 format) with 5 exponent bits, 2 mantissa bits, and 1 sign bit
|
| 58 |
+
|
| 59 |
+
* - ``ck::f4_t``
|
| 60 |
+
- 4-bit
|
| 61 |
+
- 4-bit floating point format (E2M1 format) with 2 exponent bits, 1 mantissa bit, and 1 sign bit
|
| 62 |
+
|
| 63 |
+
* - ``ck::f6_t``
|
| 64 |
+
- 6-bit
|
| 65 |
+
- 6-bit floating point format (E2M3 format) with 2 exponent bits, 3 mantissa bits, and 1 sign bit
|
| 66 |
+
|
| 67 |
+
* - ``ck::bf6_t``
|
| 68 |
+
- 6-bit
|
| 69 |
+
- 6-bit brain floating point format (E3M2 format) with 3 exponent bits, 2 mantissa bits, and 1 sign bit
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/reference/Composable_Kernel_vector_utilities.rst
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel supported precision types and custom type support
|
| 3 |
+
:keywords: composable kernel, precision, data types, ROCm
|
| 4 |
+
|
| 5 |
+
******************************************************
|
| 6 |
+
Composable Kernel vector template utilities
|
| 7 |
+
******************************************************
|
| 8 |
+
|
| 9 |
+
Composable Kernel includes template utilities for creating vector types with customizable widths. These template utilities also flatten nested vector types into a single, wider vector, preventing the creation of vectors of vectors.
|
| 10 |
+
|
| 11 |
+
Vectors composed of supported scalar and custom types can be created with the ``ck::vector_type`` template.
|
| 12 |
+
|
| 13 |
+
For example, ``ck::vector_type<float, 4>`` creates a vector composed of four floats and ``ck::vector_type<ck::half_t, 8>`` creates a vector composed of eight half-precision scalars.
|
| 14 |
+
|
| 15 |
+
For vector operations to be valid, the underlying types must be either a :doc:`supported scalar type <Composable_Kernel_supported_scalar_types>` or :doc:`a custom type <Composable_Kernel_custom_types>` that implements the required operations.
|
| 16 |
+
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/sphinx/_toc.yml.in
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
numbered: False
|
| 3 |
+
root: index
|
| 4 |
+
subtrees:
|
| 5 |
+
|
| 6 |
+
- caption: Install
|
| 7 |
+
entries:
|
| 8 |
+
- file: install/Composable-Kernel-prerequisites.rst
|
| 9 |
+
title: Composable Kernel prerequisites
|
| 10 |
+
- file: install/Composable-Kernel-install.rst
|
| 11 |
+
title: Build and install Composable Kernel
|
| 12 |
+
- file: install/Composable-Kernel-Docker.rst
|
| 13 |
+
title: Composable Kernel Docker images
|
| 14 |
+
|
| 15 |
+
- caption: Conceptual
|
| 16 |
+
entries:
|
| 17 |
+
- file: conceptual/Composable-Kernel-structure.rst
|
| 18 |
+
title: Composable Kernel structure
|
| 19 |
+
- file: conceptual/Composable-Kernel-math.rst
|
| 20 |
+
title: Composable Kernel mathematical basis
|
| 21 |
+
|
| 22 |
+
- caption: Tutorial
|
| 23 |
+
entries:
|
| 24 |
+
- file: tutorial/Composable-Kernel-examples.rst
|
| 25 |
+
title: Composable Kernel examples
|
| 26 |
+
|
| 27 |
+
- caption: Reference
|
| 28 |
+
entries:
|
| 29 |
+
- file: reference/Composable_Kernel_supported_scalar_types.rst
|
| 30 |
+
title: Composable Kernel scalar types
|
| 31 |
+
- file: reference/Composable_Kernel_custom_types.rst
|
| 32 |
+
title: Composable Kernel custom types
|
| 33 |
+
- file: reference/Composable_Kernel_vector_utilities.rst
|
| 34 |
+
title: Composable Kernel vector utilities
|
| 35 |
+
- file: reference/Composable-Kernel-wrapper.rst
|
| 36 |
+
title: Composable Kernel wrapper
|
| 37 |
+
- file: doxygen/html/annotated.rst
|
| 38 |
+
title: Composable Kernel class list
|
| 39 |
+
|
| 40 |
+
- caption: About
|
| 41 |
+
entries:
|
| 42 |
+
- file: Contributors_Guide.rst
|
| 43 |
+
title: Contributing to Composable Kernel
|
| 44 |
+
- file: license.rst
|
| 45 |
+
title: License
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/sphinx/requirements.in
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
rocm-docs-core[api_reference]==1.20.1
|
| 2 |
+
sphinxcontrib-bibtex==2.6.4
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/sphinx/requirements.txt
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# This file is autogenerated by pip-compile with Python 3.10
|
| 3 |
+
# by the following command:
|
| 4 |
+
#
|
| 5 |
+
# pip-compile requirements.in
|
| 6 |
+
#
|
| 7 |
+
accessible-pygments==0.0.5
|
| 8 |
+
# via pydata-sphinx-theme
|
| 9 |
+
alabaster==1.0.0
|
| 10 |
+
# via sphinx
|
| 11 |
+
asttokens==3.0.0
|
| 12 |
+
# via stack-data
|
| 13 |
+
attrs==25.3.0
|
| 14 |
+
# via
|
| 15 |
+
# jsonschema
|
| 16 |
+
# jupyter-cache
|
| 17 |
+
# referencing
|
| 18 |
+
babel==2.17.0
|
| 19 |
+
# via
|
| 20 |
+
# pydata-sphinx-theme
|
| 21 |
+
# sphinx
|
| 22 |
+
beautifulsoup4==4.13.4
|
| 23 |
+
# via pydata-sphinx-theme
|
| 24 |
+
breathe==4.36.0
|
| 25 |
+
# via rocm-docs-core
|
| 26 |
+
certifi==2025.1.31
|
| 27 |
+
# via requests
|
| 28 |
+
cffi==1.17.1
|
| 29 |
+
# via
|
| 30 |
+
# cryptography
|
| 31 |
+
# pynacl
|
| 32 |
+
charset-normalizer==3.4.1
|
| 33 |
+
# via requests
|
| 34 |
+
click==8.1.8
|
| 35 |
+
# via
|
| 36 |
+
# click-log
|
| 37 |
+
# doxysphinx
|
| 38 |
+
# jupyter-cache
|
| 39 |
+
# sphinx-external-toc
|
| 40 |
+
click-log==0.4.0
|
| 41 |
+
# via doxysphinx
|
| 42 |
+
comm==0.2.2
|
| 43 |
+
# via ipykernel
|
| 44 |
+
contourpy==1.3.2
|
| 45 |
+
# via matplotlib
|
| 46 |
+
cryptography==44.0.2
|
| 47 |
+
# via pyjwt
|
| 48 |
+
cycler==0.12.1
|
| 49 |
+
# via matplotlib
|
| 50 |
+
debugpy==1.8.14
|
| 51 |
+
# via ipykernel
|
| 52 |
+
decorator==5.2.1
|
| 53 |
+
# via ipython
|
| 54 |
+
deprecated==1.2.18
|
| 55 |
+
# via pygithub
|
| 56 |
+
docutils==0.21.2
|
| 57 |
+
# via
|
| 58 |
+
# myst-parser
|
| 59 |
+
# pybtex-docutils
|
| 60 |
+
# pydata-sphinx-theme
|
| 61 |
+
# sphinx
|
| 62 |
+
# sphinxcontrib-bibtex
|
| 63 |
+
doxysphinx==3.3.12
|
| 64 |
+
# via rocm-docs-core
|
| 65 |
+
exceptiongroup==1.2.2
|
| 66 |
+
# via ipython
|
| 67 |
+
executing==2.2.0
|
| 68 |
+
# via stack-data
|
| 69 |
+
fastjsonschema==2.21.1
|
| 70 |
+
# via
|
| 71 |
+
# nbformat
|
| 72 |
+
# rocm-docs-core
|
| 73 |
+
fonttools==4.57.0
|
| 74 |
+
# via matplotlib
|
| 75 |
+
gitdb==4.0.12
|
| 76 |
+
# via gitpython
|
| 77 |
+
gitpython==3.1.44
|
| 78 |
+
# via rocm-docs-core
|
| 79 |
+
greenlet==3.2.1
|
| 80 |
+
# via sqlalchemy
|
| 81 |
+
idna==3.10
|
| 82 |
+
# via requests
|
| 83 |
+
imagesize==1.4.1
|
| 84 |
+
# via sphinx
|
| 85 |
+
importlib-metadata==8.6.1
|
| 86 |
+
# via
|
| 87 |
+
# jupyter-cache
|
| 88 |
+
# myst-nb
|
| 89 |
+
ipykernel==6.29.5
|
| 90 |
+
# via myst-nb
|
| 91 |
+
ipython==8.35.0
|
| 92 |
+
# via
|
| 93 |
+
# ipykernel
|
| 94 |
+
# myst-nb
|
| 95 |
+
jedi==0.19.2
|
| 96 |
+
# via ipython
|
| 97 |
+
jinja2==3.1.6
|
| 98 |
+
# via
|
| 99 |
+
# myst-parser
|
| 100 |
+
# sphinx
|
| 101 |
+
jsonschema==4.23.0
|
| 102 |
+
# via nbformat
|
| 103 |
+
jsonschema-specifications==2024.10.1
|
| 104 |
+
# via jsonschema
|
| 105 |
+
jupyter-cache==1.0.1
|
| 106 |
+
# via myst-nb
|
| 107 |
+
jupyter-client==8.6.3
|
| 108 |
+
# via
|
| 109 |
+
# ipykernel
|
| 110 |
+
# nbclient
|
| 111 |
+
jupyter-core==5.7.2
|
| 112 |
+
# via
|
| 113 |
+
# ipykernel
|
| 114 |
+
# jupyter-client
|
| 115 |
+
# nbclient
|
| 116 |
+
# nbformat
|
| 117 |
+
kiwisolver==1.4.8
|
| 118 |
+
# via matplotlib
|
| 119 |
+
latexcodec==3.0.0
|
| 120 |
+
# via pybtex
|
| 121 |
+
libsass==0.22.0
|
| 122 |
+
# via doxysphinx
|
| 123 |
+
lxml==5.2.1
|
| 124 |
+
# via doxysphinx
|
| 125 |
+
markdown-it-py==3.0.0
|
| 126 |
+
# via
|
| 127 |
+
# mdit-py-plugins
|
| 128 |
+
# myst-parser
|
| 129 |
+
markupsafe==3.0.2
|
| 130 |
+
# via jinja2
|
| 131 |
+
matplotlib==3.10.1
|
| 132 |
+
# via doxysphinx
|
| 133 |
+
matplotlib-inline==0.1.7
|
| 134 |
+
# via
|
| 135 |
+
# ipykernel
|
| 136 |
+
# ipython
|
| 137 |
+
mdit-py-plugins==0.4.2
|
| 138 |
+
# via myst-parser
|
| 139 |
+
mdurl==0.1.2
|
| 140 |
+
# via markdown-it-py
|
| 141 |
+
mpire==2.10.2
|
| 142 |
+
# via doxysphinx
|
| 143 |
+
myst-nb==1.2.0
|
| 144 |
+
# via rocm-docs-core
|
| 145 |
+
myst-parser==4.0.1
|
| 146 |
+
# via myst-nb
|
| 147 |
+
nbclient==0.10.2
|
| 148 |
+
# via
|
| 149 |
+
# jupyter-cache
|
| 150 |
+
# myst-nb
|
| 151 |
+
nbformat==5.10.4
|
| 152 |
+
# via
|
| 153 |
+
# jupyter-cache
|
| 154 |
+
# myst-nb
|
| 155 |
+
# nbclient
|
| 156 |
+
nest-asyncio==1.6.0
|
| 157 |
+
# via ipykernel
|
| 158 |
+
numpy==1.26.4
|
| 159 |
+
# via
|
| 160 |
+
# contourpy
|
| 161 |
+
# doxysphinx
|
| 162 |
+
# matplotlib
|
| 163 |
+
packaging==25.0
|
| 164 |
+
# via
|
| 165 |
+
# ipykernel
|
| 166 |
+
# matplotlib
|
| 167 |
+
# pydata-sphinx-theme
|
| 168 |
+
# sphinx
|
| 169 |
+
parso==0.8.4
|
| 170 |
+
# via jedi
|
| 171 |
+
pexpect==4.9.0
|
| 172 |
+
# via ipython
|
| 173 |
+
pillow==11.2.1
|
| 174 |
+
# via matplotlib
|
| 175 |
+
platformdirs==4.3.7
|
| 176 |
+
# via jupyter-core
|
| 177 |
+
prompt-toolkit==3.0.51
|
| 178 |
+
# via ipython
|
| 179 |
+
psutil==7.0.0
|
| 180 |
+
# via ipykernel
|
| 181 |
+
ptyprocess==0.7.0
|
| 182 |
+
# via pexpect
|
| 183 |
+
pure-eval==0.2.3
|
| 184 |
+
# via stack-data
|
| 185 |
+
pybtex==0.24.0
|
| 186 |
+
# via
|
| 187 |
+
# pybtex-docutils
|
| 188 |
+
# sphinxcontrib-bibtex
|
| 189 |
+
pybtex-docutils==1.0.3
|
| 190 |
+
# via sphinxcontrib-bibtex
|
| 191 |
+
pycparser==2.22
|
| 192 |
+
# via cffi
|
| 193 |
+
pydata-sphinx-theme==0.15.4
|
| 194 |
+
# via
|
| 195 |
+
# rocm-docs-core
|
| 196 |
+
# sphinx-book-theme
|
| 197 |
+
pygithub==2.6.1
|
| 198 |
+
# via rocm-docs-core
|
| 199 |
+
pygments==2.19.1
|
| 200 |
+
# via
|
| 201 |
+
# accessible-pygments
|
| 202 |
+
# ipython
|
| 203 |
+
# mpire
|
| 204 |
+
# pydata-sphinx-theme
|
| 205 |
+
# sphinx
|
| 206 |
+
pyjson5==1.6.8
|
| 207 |
+
# via doxysphinx
|
| 208 |
+
pyjwt[crypto]==2.10.1
|
| 209 |
+
# via pygithub
|
| 210 |
+
pynacl==1.5.0
|
| 211 |
+
# via pygithub
|
| 212 |
+
pyparsing==3.2.3
|
| 213 |
+
# via
|
| 214 |
+
# doxysphinx
|
| 215 |
+
# matplotlib
|
| 216 |
+
python-dateutil==2.9.0.post0
|
| 217 |
+
# via
|
| 218 |
+
# jupyter-client
|
| 219 |
+
# matplotlib
|
| 220 |
+
pyyaml==6.0.2
|
| 221 |
+
# via
|
| 222 |
+
# jupyter-cache
|
| 223 |
+
# myst-nb
|
| 224 |
+
# myst-parser
|
| 225 |
+
# pybtex
|
| 226 |
+
# rocm-docs-core
|
| 227 |
+
# sphinx-external-toc
|
| 228 |
+
pyzmq==26.4.0
|
| 229 |
+
# via
|
| 230 |
+
# ipykernel
|
| 231 |
+
# jupyter-client
|
| 232 |
+
referencing==0.36.2
|
| 233 |
+
# via
|
| 234 |
+
# jsonschema
|
| 235 |
+
# jsonschema-specifications
|
| 236 |
+
requests==2.32.3
|
| 237 |
+
# via
|
| 238 |
+
# pygithub
|
| 239 |
+
# sphinx
|
| 240 |
+
rocm-docs-core[api-reference]==1.20.1
|
| 241 |
+
# via -r requirements.in
|
| 242 |
+
rpds-py==0.24.0
|
| 243 |
+
# via
|
| 244 |
+
# jsonschema
|
| 245 |
+
# referencing
|
| 246 |
+
six==1.17.0
|
| 247 |
+
# via
|
| 248 |
+
# pybtex
|
| 249 |
+
# python-dateutil
|
| 250 |
+
smmap==5.0.2
|
| 251 |
+
# via gitdb
|
| 252 |
+
snowballstemmer==2.2.0
|
| 253 |
+
# via sphinx
|
| 254 |
+
soupsieve==2.7
|
| 255 |
+
# via beautifulsoup4
|
| 256 |
+
sphinx==8.1.3
|
| 257 |
+
# via
|
| 258 |
+
# breathe
|
| 259 |
+
# myst-nb
|
| 260 |
+
# myst-parser
|
| 261 |
+
# pydata-sphinx-theme
|
| 262 |
+
# rocm-docs-core
|
| 263 |
+
# sphinx-book-theme
|
| 264 |
+
# sphinx-copybutton
|
| 265 |
+
# sphinx-design
|
| 266 |
+
# sphinx-external-toc
|
| 267 |
+
# sphinx-notfound-page
|
| 268 |
+
# sphinxcontrib-bibtex
|
| 269 |
+
sphinx-book-theme==1.1.4
|
| 270 |
+
# via rocm-docs-core
|
| 271 |
+
sphinx-copybutton==0.5.2
|
| 272 |
+
# via rocm-docs-core
|
| 273 |
+
sphinx-design==0.6.1
|
| 274 |
+
# via rocm-docs-core
|
| 275 |
+
sphinx-external-toc==1.0.1
|
| 276 |
+
# via rocm-docs-core
|
| 277 |
+
sphinx-notfound-page==1.1.0
|
| 278 |
+
# via rocm-docs-core
|
| 279 |
+
sphinxcontrib-applehelp==2.0.0
|
| 280 |
+
# via sphinx
|
| 281 |
+
sphinxcontrib-bibtex==2.6.4
|
| 282 |
+
# via -r requirements.in
|
| 283 |
+
sphinxcontrib-devhelp==2.0.0
|
| 284 |
+
# via sphinx
|
| 285 |
+
sphinxcontrib-htmlhelp==2.1.0
|
| 286 |
+
# via sphinx
|
| 287 |
+
sphinxcontrib-jsmath==1.0.1
|
| 288 |
+
# via sphinx
|
| 289 |
+
sphinxcontrib-qthelp==2.0.0
|
| 290 |
+
# via sphinx
|
| 291 |
+
sphinxcontrib-serializinghtml==2.0.0
|
| 292 |
+
# via sphinx
|
| 293 |
+
sqlalchemy==2.0.40
|
| 294 |
+
# via jupyter-cache
|
| 295 |
+
stack-data==0.6.3
|
| 296 |
+
# via ipython
|
| 297 |
+
tabulate==0.9.0
|
| 298 |
+
# via jupyter-cache
|
| 299 |
+
tomli==2.2.1
|
| 300 |
+
# via sphinx
|
| 301 |
+
tornado==6.4.2
|
| 302 |
+
# via
|
| 303 |
+
# ipykernel
|
| 304 |
+
# jupyter-client
|
| 305 |
+
tqdm==4.67.1
|
| 306 |
+
# via mpire
|
| 307 |
+
traitlets==5.14.3
|
| 308 |
+
# via
|
| 309 |
+
# comm
|
| 310 |
+
# ipykernel
|
| 311 |
+
# ipython
|
| 312 |
+
# jupyter-client
|
| 313 |
+
# jupyter-core
|
| 314 |
+
# matplotlib-inline
|
| 315 |
+
# nbclient
|
| 316 |
+
# nbformat
|
| 317 |
+
typing-extensions==4.13.2
|
| 318 |
+
# via
|
| 319 |
+
# beautifulsoup4
|
| 320 |
+
# ipython
|
| 321 |
+
# myst-nb
|
| 322 |
+
# pydata-sphinx-theme
|
| 323 |
+
# pygithub
|
| 324 |
+
# referencing
|
| 325 |
+
# sqlalchemy
|
| 326 |
+
urllib3==2.4.0
|
| 327 |
+
# via
|
| 328 |
+
# pygithub
|
| 329 |
+
# requests
|
| 330 |
+
wcwidth==0.2.13
|
| 331 |
+
# via prompt-toolkit
|
| 332 |
+
wrapt==1.17.2
|
| 333 |
+
# via deprecated
|
| 334 |
+
zipp==3.21.0
|
| 335 |
+
# via importlib-metadata
|
Code/Baselines/flash-attention/csrc/composable_kernel/docs/tutorial/Composable-Kernel-examples.rst
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.. meta::
|
| 2 |
+
:description: Composable Kernel examples and tests
|
| 3 |
+
:keywords: composable kernel, CK, ROCm, API, examples, tests
|
| 4 |
+
|
| 5 |
+
********************************************************************
|
| 6 |
+
Composable Kernel examples and tests
|
| 7 |
+
********************************************************************
|
| 8 |
+
|
| 9 |
+
After :doc:`building and installing Composable Kernel <../install/Composable-Kernel-install>`, the examples and tests will be moved to ``/opt/rocm/bin/``.
|
| 10 |
+
|
| 11 |
+
All tests have the prefix ``test`` and all examples have the prefix ``example``.
|
| 12 |
+
|
| 13 |
+
Use ``ctest`` with no arguments to run all examples and tests, or use ``ctest -R`` to run a single test. For example:
|
| 14 |
+
|
| 15 |
+
.. code:: shell
|
| 16 |
+
|
| 17 |
+
ctest -R test_gemm_fp16
|
| 18 |
+
|
| 19 |
+
Examples can be run individually as well. For example:
|
| 20 |
+
|
| 21 |
+
.. code:: shell
|
| 22 |
+
|
| 23 |
+
./bin/example_gemm_xdl_fp16 1 1 1
|
| 24 |
+
|
| 25 |
+
For instructions on how to run individual examples and tests, see their README files in the |example|_ and |test|_ GitHub folders.
|
| 26 |
+
|
| 27 |
+
To run smoke tests, use ``make smoke``.
|
| 28 |
+
|
| 29 |
+
To run regression tests, use ``make regression``.
|
| 30 |
+
|
| 31 |
+
In general, tests that run for under thirty seconds are included in the smoke tests and tests that run for over thirty seconds are included in the regression tests.
|
| 32 |
+
|
| 33 |
+
.. |example| replace:: ``example``
|
| 34 |
+
.. _example: https://github.com/ROCm/composable_kernel/tree/develop/example
|
| 35 |
+
|
| 36 |
+
.. |client_example| replace:: ``client_example``
|
| 37 |
+
.. _client_example: https://github.com/ROCm/composable_kernel/tree/develop/client_example
|
| 38 |
+
|
| 39 |
+
.. |test| replace:: ``test``
|
| 40 |
+
.. _test: https://github.com/ROCm/composable_kernel/tree/develop/test
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/data_type_enum.hpp
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
namespace ck {
|
| 7 |
+
|
| 8 |
+
enum struct DataTypeEnum
|
| 9 |
+
{
|
| 10 |
+
Half = 0,
|
| 11 |
+
Float = 1,
|
| 12 |
+
Int32 = 2,
|
| 13 |
+
Int8 = 3,
|
| 14 |
+
Int8x4 = 4,
|
| 15 |
+
BFloat16 = 5,
|
| 16 |
+
Double = 6,
|
| 17 |
+
Float8 = 7,
|
| 18 |
+
Unknown = 100,
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_avg_pool2d_bwd_impl.hpp
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/library/tensor_operation_instance/gpu/avg_pool2d_bwd.hpp"
|
| 10 |
+
#include "ck/library/utility/check_err.hpp"
|
| 11 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 12 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 13 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 14 |
+
#include "ck/library/utility/literals.hpp"
|
| 15 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_avgpool_bwd.hpp"
|
| 16 |
+
|
| 17 |
+
namespace ck {
|
| 18 |
+
namespace profiler {
|
| 19 |
+
|
| 20 |
+
template <typename TensorLayout>
|
| 21 |
+
std::vector<ck::index_t> f_tensor_strides_nchw(
|
| 22 |
+
ck::index_t N, ck::index_t C, ck::index_t H, ck::index_t W, TensorLayout layout)
|
| 23 |
+
{
|
| 24 |
+
using namespace ck::literals;
|
| 25 |
+
(void)N;
|
| 26 |
+
if constexpr(ck::is_same<decltype(layout), ck::tensor_layout::convolution::NHWC>::value)
|
| 27 |
+
return {C * H * W, 1_uz, W * C, C};
|
| 28 |
+
else
|
| 29 |
+
throw std::runtime_error("not supported yet");
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
template <typename DOutDataType, typename DInDataType, typename DOutLayout, typename DInLayout>
|
| 33 |
+
bool profile_avg_pool2d_bwd_impl(int do_verification,
|
| 34 |
+
int init_method,
|
| 35 |
+
bool do_log,
|
| 36 |
+
bool time_kernel,
|
| 37 |
+
std::vector<index_t> in_length,
|
| 38 |
+
std::vector<index_t> window_spatial_lengths,
|
| 39 |
+
std::vector<index_t> window_strides,
|
| 40 |
+
std::vector<index_t> window_dilations,
|
| 41 |
+
std::vector<index_t> input_left_pads,
|
| 42 |
+
std::vector<index_t> input_right_pads)
|
| 43 |
+
{
|
| 44 |
+
constexpr index_t InOutRank = 4;
|
| 45 |
+
constexpr index_t WindowRank = 2;
|
| 46 |
+
|
| 47 |
+
if(in_length.size() != InOutRank || window_spatial_lengths.size() != WindowRank ||
|
| 48 |
+
window_strides.size() != WindowRank || window_dilations.size() != WindowRank ||
|
| 49 |
+
input_left_pads.size() != WindowRank || input_right_pads.size() != WindowRank)
|
| 50 |
+
{
|
| 51 |
+
std::cout << "Parameter is incorrect" << std::endl;
|
| 52 |
+
return false;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
std::vector<index_t> out_length(InOutRank);
|
| 56 |
+
|
| 57 |
+
const int N = in_length[0];
|
| 58 |
+
const int C = in_length[1];
|
| 59 |
+
|
| 60 |
+
out_length[0] = N;
|
| 61 |
+
out_length[1] = C;
|
| 62 |
+
|
| 63 |
+
// Calculate Ho, Wo
|
| 64 |
+
for(unsigned i = 2; i < InOutRank; ++i)
|
| 65 |
+
{
|
| 66 |
+
const int idx = i - 2;
|
| 67 |
+
auto pad1 = input_left_pads[idx];
|
| 68 |
+
auto pad2 = input_right_pads[idx];
|
| 69 |
+
auto windows_size = window_spatial_lengths[idx];
|
| 70 |
+
auto windows_stride = window_strides[idx];
|
| 71 |
+
auto windows_dilation = window_dilations[idx];
|
| 72 |
+
auto eff = (windows_size - 1) * windows_dilation + 1;
|
| 73 |
+
out_length[i] = (in_length[i] + pad1 + pad2 - eff) / windows_stride + 1;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
const int Hi = in_length[2];
|
| 77 |
+
const int Wi = in_length[3];
|
| 78 |
+
const int Ho = out_length[2];
|
| 79 |
+
const int Wo = out_length[3];
|
| 80 |
+
|
| 81 |
+
auto f_host_tensor_descriptor =
|
| 82 |
+
[](std::size_t N_, std::size_t C_, std::size_t H, std::size_t W) {
|
| 83 |
+
using namespace ck::literals;
|
| 84 |
+
|
| 85 |
+
return HostTensorDescriptor({N_, C_, H, W}, {C_ * H * W, 1_uz, W * C_, C_});
|
| 86 |
+
};
|
| 87 |
+
|
| 88 |
+
Tensor<DOutDataType> out_n_c_ho_wo_host(f_host_tensor_descriptor(N, C, Ho, Wo));
|
| 89 |
+
Tensor<DInDataType> in_n_c_hi_wi_device(f_host_tensor_descriptor(N, C, Hi, Wi));
|
| 90 |
+
Tensor<DInDataType> in_n_c_hi_wi_host(f_host_tensor_descriptor(N, C, Hi, Wi));
|
| 91 |
+
|
| 92 |
+
switch(init_method)
|
| 93 |
+
{
|
| 94 |
+
case 0: {
|
| 95 |
+
out_n_c_ho_wo_host.GenerateTensorValue(GeneratorTensor_1<DOutDataType>{});
|
| 96 |
+
break;
|
| 97 |
+
}
|
| 98 |
+
case 1: {
|
| 99 |
+
out_n_c_ho_wo_host.GenerateTensorValue(GeneratorTensor_2<DOutDataType>{-5, 5});
|
| 100 |
+
break;
|
| 101 |
+
}
|
| 102 |
+
default: {
|
| 103 |
+
out_n_c_ho_wo_host.GenerateTensorValue(GeneratorTensor_3<DOutDataType>{-0.5, 0.5});
|
| 104 |
+
}
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
DeviceMem dout_device_buf(sizeof(DOutDataType) *
|
| 108 |
+
out_n_c_ho_wo_host.mDesc.GetElementSpaceSize());
|
| 109 |
+
DeviceMem din_device_buf(sizeof(DInDataType) * in_n_c_hi_wi_device.mDesc.GetElementSpaceSize());
|
| 110 |
+
|
| 111 |
+
dout_device_buf.ToDevice(out_n_c_ho_wo_host.mData.data());
|
| 112 |
+
|
| 113 |
+
using DeviceOp = ck::tensor_operation::device::
|
| 114 |
+
DeviceAvgPoolBwd<2, DOutDataType, DInDataType, DOutLayout, DInLayout>;
|
| 115 |
+
|
| 116 |
+
// get device op instances
|
| 117 |
+
const auto instance_ptrs =
|
| 118 |
+
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 119 |
+
DeviceOp>::GetInstances();
|
| 120 |
+
|
| 121 |
+
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
|
| 122 |
+
|
| 123 |
+
std::string best_instance_name;
|
| 124 |
+
float best_avg_time = std::numeric_limits<float>::max();
|
| 125 |
+
float best_gb_per_sec = 0;
|
| 126 |
+
|
| 127 |
+
if(do_verification)
|
| 128 |
+
{
|
| 129 |
+
using ReferencePoolingBwdInstance =
|
| 130 |
+
ck::tensor_operation::host::ReferenceAvgPoolBwd<2, DInDataType, DOutDataType>;
|
| 131 |
+
|
| 132 |
+
ReferencePoolingBwdInstance ref_pooling_bwd;
|
| 133 |
+
auto ref_pooling_bwd_argument = ref_pooling_bwd.MakeArgument(in_n_c_hi_wi_host,
|
| 134 |
+
out_n_c_ho_wo_host,
|
| 135 |
+
window_spatial_lengths,
|
| 136 |
+
window_strides,
|
| 137 |
+
window_dilations,
|
| 138 |
+
input_left_pads,
|
| 139 |
+
input_right_pads);
|
| 140 |
+
|
| 141 |
+
auto ref_invoker = ref_pooling_bwd.MakeInvoker();
|
| 142 |
+
ref_invoker.Run(ref_pooling_bwd_argument);
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
int num_kernel = 0;
|
| 146 |
+
bool pass = true;
|
| 147 |
+
bool instance_found = false;
|
| 148 |
+
for(auto& inst_ptr : instance_ptrs)
|
| 149 |
+
{
|
| 150 |
+
auto argument_ptr = inst_ptr->MakeArgumentPointer(
|
| 151 |
+
static_cast<DOutDataType*>(dout_device_buf.GetDeviceBuffer()),
|
| 152 |
+
static_cast<DInDataType*>(din_device_buf.GetDeviceBuffer()),
|
| 153 |
+
{N, C, Ho, Wo},
|
| 154 |
+
{N, C, Hi, Wi},
|
| 155 |
+
f_tensor_strides_nchw(N, C, Ho, Wo, DOutLayout{}),
|
| 156 |
+
f_tensor_strides_nchw(N, C, Hi, Wi, DInLayout{}),
|
| 157 |
+
window_spatial_lengths,
|
| 158 |
+
window_strides,
|
| 159 |
+
window_dilations,
|
| 160 |
+
input_left_pads,
|
| 161 |
+
input_right_pads);
|
| 162 |
+
|
| 163 |
+
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 164 |
+
{
|
| 165 |
+
++num_kernel;
|
| 166 |
+
instance_found = true;
|
| 167 |
+
}
|
| 168 |
+
else
|
| 169 |
+
{
|
| 170 |
+
if(time_kernel)
|
| 171 |
+
{
|
| 172 |
+
std::cout << inst_ptr->GetTypeString() << " skipped due to unsupported argument: ";
|
| 173 |
+
LogRange(std::cout << "doutput lengths = ", out_length, ", ") << std::endl;
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
continue;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
din_device_buf.SetZero();
|
| 180 |
+
|
| 181 |
+
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
|
| 182 |
+
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 183 |
+
|
| 184 |
+
std::size_t num_bytes = out_n_c_ho_wo_host.mDesc.GetElementSize() * sizeof(DOutDataType) +
|
| 185 |
+
in_n_c_hi_wi_device.mDesc.GetElementSize() * sizeof(DInDataType);
|
| 186 |
+
|
| 187 |
+
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
| 188 |
+
|
| 189 |
+
if(time_kernel)
|
| 190 |
+
{
|
| 191 |
+
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << gb_per_sec << " GB/s, "
|
| 192 |
+
<< inst_ptr->GetTypeString() << std::endl;
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
if(avg_time < best_avg_time)
|
| 196 |
+
{
|
| 197 |
+
best_instance_name = inst_ptr->GetTypeString();
|
| 198 |
+
best_avg_time = avg_time;
|
| 199 |
+
best_gb_per_sec = gb_per_sec;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
if(do_verification)
|
| 203 |
+
{
|
| 204 |
+
din_device_buf.FromDevice(in_n_c_hi_wi_device.mData.data());
|
| 205 |
+
bool local_pass = ck::utils::check_err(in_n_c_hi_wi_device.mData,
|
| 206 |
+
in_n_c_hi_wi_host.mData,
|
| 207 |
+
"Error: Incorrect results",
|
| 208 |
+
1e-3,
|
| 209 |
+
1e-3);
|
| 210 |
+
|
| 211 |
+
if(do_log)
|
| 212 |
+
{
|
| 213 |
+
LogRangeAsType<float>(
|
| 214 |
+
std::cout << "in_n_c_hi_wi_device: ", in_n_c_hi_wi_device.mData, ",")
|
| 215 |
+
<< std::endl;
|
| 216 |
+
|
| 217 |
+
LogRangeAsType<float>(
|
| 218 |
+
std::cout << "in_n_c_hi_wi_host: ", in_n_c_hi_wi_host.mData, ",")
|
| 219 |
+
<< std::endl;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
if(!local_pass)
|
| 223 |
+
{
|
| 224 |
+
std::cout << inst_ptr->GetTypeString() << " failed verification: ";
|
| 225 |
+
LogRange(std::cout << "doutput lengths = [", out_length, ", ") << "]." << std::endl;
|
| 226 |
+
pass &= local_pass;
|
| 227 |
+
}
|
| 228 |
+
else
|
| 229 |
+
{
|
| 230 |
+
if(time_kernel)
|
| 231 |
+
{
|
| 232 |
+
std::cout << "pass" << std::endl;
|
| 233 |
+
}
|
| 234 |
+
}
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
if(time_kernel)
|
| 239 |
+
{
|
| 240 |
+
LogRange(std::cout << "length = ", out_length, ",") << std::endl;
|
| 241 |
+
std::cout << "best perf = " << best_avg_time << " ms, " << best_gb_per_sec << " GB/s, "
|
| 242 |
+
<< best_instance_name << std::endl;
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
if(num_kernel == 0)
|
| 246 |
+
{
|
| 247 |
+
std::cout << "Error: No kernel is applicable" << std::endl;
|
| 248 |
+
return false;
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
return pass && instance_found;
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
} // namespace profiler
|
| 255 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batched_gemm_b_scale_impl.hpp
ADDED
|
@@ -0,0 +1,488 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2023-2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/impl/device_batched_gemm_xdl_fpAintB_b_scale.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 13 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm_b_scale.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 18 |
+
#include "ck/library/utility/check_err.hpp"
|
| 19 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 21 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 22 |
+
#include "ck/library/utility/literals.hpp"
|
| 23 |
+
|
| 24 |
+
namespace ck {
|
| 25 |
+
namespace profiler {
|
| 26 |
+
|
| 27 |
+
template <typename ADataType,
|
| 28 |
+
typename BDataType,
|
| 29 |
+
typename BScaleDataType,
|
| 30 |
+
typename ComputeDataType,
|
| 31 |
+
typename AccDataType,
|
| 32 |
+
typename CDataType,
|
| 33 |
+
index_t ScaleBlockK,
|
| 34 |
+
typename ALayout,
|
| 35 |
+
typename BLayout,
|
| 36 |
+
typename CLayout>
|
| 37 |
+
bool profile_batched_gemm_b_scale_impl(int do_verification,
|
| 38 |
+
int init_method,
|
| 39 |
+
bool do_log,
|
| 40 |
+
bool time_kernel,
|
| 41 |
+
int M,
|
| 42 |
+
int N,
|
| 43 |
+
int K,
|
| 44 |
+
int StrideA,
|
| 45 |
+
int StrideB,
|
| 46 |
+
int StrideC,
|
| 47 |
+
int BatchStrideA,
|
| 48 |
+
int BatchStrideB,
|
| 49 |
+
int BatchStrideC,
|
| 50 |
+
int BatchStrideScaleB,
|
| 51 |
+
int BatchSize,
|
| 52 |
+
int KBatch,
|
| 53 |
+
int n_warmup,
|
| 54 |
+
int n_iter,
|
| 55 |
+
uint64_t rotating = 0)
|
| 56 |
+
{
|
| 57 |
+
bool pass = true;
|
| 58 |
+
|
| 59 |
+
auto f_host_tensor_descriptor = [](std::size_t batch_count,
|
| 60 |
+
std::size_t row,
|
| 61 |
+
std::size_t col,
|
| 62 |
+
std::size_t stride,
|
| 63 |
+
std::size_t batch_stride,
|
| 64 |
+
auto layout) {
|
| 65 |
+
using namespace ck::literals;
|
| 66 |
+
|
| 67 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 68 |
+
{
|
| 69 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, stride, 1_uz});
|
| 70 |
+
}
|
| 71 |
+
else
|
| 72 |
+
{
|
| 73 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, 1_uz, stride});
|
| 74 |
+
}
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
+
ck::index_t Scale_Stride_BN = ck::is_same_v<BLayout, ck::tensor_layout::gemm::ColumnMajor>
|
| 78 |
+
? ((K + ScaleBlockK - 1) / ScaleBlockK)
|
| 79 |
+
: N;
|
| 80 |
+
|
| 81 |
+
Tensor<ADataType> a_g_m_k(
|
| 82 |
+
f_host_tensor_descriptor(BatchSize, M, K, StrideA, BatchStrideA, ALayout{}));
|
| 83 |
+
Tensor<BDataType> b_g_k_n(
|
| 84 |
+
f_host_tensor_descriptor(BatchSize, K, N, StrideB, BatchStrideB, BLayout{}));
|
| 85 |
+
Tensor<BDataType> b_g_k_n_permute(
|
| 86 |
+
f_host_tensor_descriptor(BatchSize, K, N, StrideB, BatchStrideB, BLayout{}));
|
| 87 |
+
Tensor<BScaleDataType> b1_g_k_n(f_host_tensor_descriptor(
|
| 88 |
+
BatchSize,
|
| 89 |
+
(K + ScaleBlockK - 1) / ScaleBlockK, // K direction group size is ScaleBlockK
|
| 90 |
+
N, // N direction group size is 1
|
| 91 |
+
Scale_Stride_BN,
|
| 92 |
+
BatchStrideScaleB,
|
| 93 |
+
BLayout{}));
|
| 94 |
+
Tensor<CDataType> c_g_m_n_host_result(
|
| 95 |
+
f_host_tensor_descriptor(BatchSize, M, N, StrideC, BatchStrideC, CLayout{}));
|
| 96 |
+
Tensor<CDataType> c_g_m_n_device_result(
|
| 97 |
+
f_host_tensor_descriptor(BatchSize, M, N, StrideC, BatchStrideC, CLayout{}));
|
| 98 |
+
|
| 99 |
+
int total_gemm_needed = a_g_m_k.GetElementSpaceSizeInBytes() +
|
| 100 |
+
b_g_k_n.GetElementSpaceSizeInBytes() +
|
| 101 |
+
b1_g_k_n.GetElementSpaceSizeInBytes();
|
| 102 |
+
|
| 103 |
+
int rotating_count = std::max(
|
| 104 |
+
1,
|
| 105 |
+
std::min(n_iter,
|
| 106 |
+
static_cast<int>(std::ceil(static_cast<double>(rotating) / total_gemm_needed))));
|
| 107 |
+
|
| 108 |
+
std::cout << "a_g_m_k: " << a_g_m_k.mDesc << std::endl;
|
| 109 |
+
std::cout << "b_g_k_n: " << b_g_k_n.mDesc << std::endl;
|
| 110 |
+
std::cout << "b1_g_k_n: " << b1_g_k_n.mDesc << std::endl;
|
| 111 |
+
std::cout << "c_g_m_n: " << c_g_m_n_device_result.mDesc << std::endl;
|
| 112 |
+
std::cout << "rotating count: " << rotating_count << std::endl;
|
| 113 |
+
|
| 114 |
+
switch(init_method)
|
| 115 |
+
{
|
| 116 |
+
case 0: break;
|
| 117 |
+
case 1:
|
| 118 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-1, 2});
|
| 119 |
+
b_g_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-1, 2});
|
| 120 |
+
b1_g_k_n.GenerateTensorValue(GeneratorTensor_3<BScaleDataType>{0, 1.0});
|
| 121 |
+
break;
|
| 122 |
+
case 2:
|
| 123 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 124 |
+
b_g_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
| 125 |
+
b1_g_k_n.GenerateTensorValue(GeneratorTensor_3<BScaleDataType>{0, 1.0});
|
| 126 |
+
break;
|
| 127 |
+
default:
|
| 128 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 129 |
+
b_g_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2});
|
| 130 |
+
b1_g_k_n.GenerateTensorValue(GeneratorTensor_3<BScaleDataType>{0, 1.0});
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 134 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 135 |
+
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 136 |
+
|
| 137 |
+
const auto a_element_op = AElementOp{};
|
| 138 |
+
const auto b_element_op = BElementOp{};
|
| 139 |
+
const auto c_element_op = CElementOp{};
|
| 140 |
+
|
| 141 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_g_m_k.mDesc.GetElementSpaceSize());
|
| 142 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_g_k_n_permute.mDesc.GetElementSpaceSize());
|
| 143 |
+
DeviceMem b1_device_buf(sizeof(BScaleDataType) * b1_g_k_n.mDesc.GetElementSpaceSize());
|
| 144 |
+
DeviceMem c_device_buf(sizeof(CDataType) * c_g_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 145 |
+
|
| 146 |
+
a_device_buf.ToDevice(a_g_m_k.mData.data());
|
| 147 |
+
b1_device_buf.ToDevice(b1_g_k_n.mData.data());
|
| 148 |
+
|
| 149 |
+
using DeviceOp = ck::tensor_operation::device::DeviceBatchedGemmV2BScale<ALayout,
|
| 150 |
+
BLayout,
|
| 151 |
+
CLayout,
|
| 152 |
+
ADataType,
|
| 153 |
+
BDataType,
|
| 154 |
+
BScaleDataType,
|
| 155 |
+
CDataType,
|
| 156 |
+
1,
|
| 157 |
+
ScaleBlockK,
|
| 158 |
+
AElementOp,
|
| 159 |
+
BElementOp,
|
| 160 |
+
CElementOp>;
|
| 161 |
+
|
| 162 |
+
// get device op instances
|
| 163 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 164 |
+
DeviceOp>::GetInstances();
|
| 165 |
+
|
| 166 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 167 |
+
|
| 168 |
+
// Run reference GEMM
|
| 169 |
+
if(do_verification)
|
| 170 |
+
{
|
| 171 |
+
Tensor<float> b_g_k_n_dequant({K, N});
|
| 172 |
+
|
| 173 |
+
float v_b = 0;
|
| 174 |
+
for(int bs = 0; bs < BatchSize; bs++)
|
| 175 |
+
{
|
| 176 |
+
for(int n = 0; n < N; n++)
|
| 177 |
+
{
|
| 178 |
+
for(int k = 0; k < K; k++)
|
| 179 |
+
{
|
| 180 |
+
ck::pk_i4_t i4x2 = b_g_k_n(bs, k, n).data;
|
| 181 |
+
int8_t i4 = 0;
|
| 182 |
+
if(k % 2 == 1)
|
| 183 |
+
i4 = (i4x2.data >> 0) & 0xf;
|
| 184 |
+
else
|
| 185 |
+
i4 = (i4x2.data >> 4) & 0xf;
|
| 186 |
+
i4 = i4 - 8;
|
| 187 |
+
v_b = ck::type_convert<float>(i4);
|
| 188 |
+
|
| 189 |
+
b_g_k_n_dequant(bs, k, n) =
|
| 190 |
+
ck::type_convert<float>(v_b) *
|
| 191 |
+
ck::type_convert<float>(b1_g_k_n(bs, k / ScaleBlockK, n));
|
| 192 |
+
}
|
| 193 |
+
}
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 197 |
+
BDataType,
|
| 198 |
+
CDataType,
|
| 199 |
+
AccDataType,
|
| 200 |
+
AElementOp,
|
| 201 |
+
BElementOp,
|
| 202 |
+
CElementOp,
|
| 203 |
+
ComputeDataType>;
|
| 204 |
+
|
| 205 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 206 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 207 |
+
|
| 208 |
+
auto ref_argument = ref_gemm.MakeArgument(a_g_m_k,
|
| 209 |
+
b_g_k_n_dequant,
|
| 210 |
+
c_g_m_n_host_result,
|
| 211 |
+
a_element_op,
|
| 212 |
+
b_element_op,
|
| 213 |
+
c_element_op);
|
| 214 |
+
|
| 215 |
+
ref_invoker.Run(ref_argument);
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
std::string best_op_name;
|
| 219 |
+
float best_ave_time = 0;
|
| 220 |
+
float best_tflops = 0;
|
| 221 |
+
float best_gb_per_sec = 0;
|
| 222 |
+
float best_kbatch = 0;
|
| 223 |
+
|
| 224 |
+
// profile device GEMM instances
|
| 225 |
+
for(auto& op_ptr : op_ptrs)
|
| 226 |
+
{
|
| 227 |
+
const int KPerBlock = op_ptr->GetKPerBlock();
|
| 228 |
+
|
| 229 |
+
if(op_ptr->GetPermuteB())
|
| 230 |
+
{
|
| 231 |
+
int K1 = KPerBlock;
|
| 232 |
+
int K0 = K / KPerBlock;
|
| 233 |
+
|
| 234 |
+
// int K0, N, K1
|
| 235 |
+
for(int bs = 0; bs < BatchSize; bs++)
|
| 236 |
+
{
|
| 237 |
+
for(int j = 0; j < K0; j++)
|
| 238 |
+
{
|
| 239 |
+
for(int i = 0; i < N; i++)
|
| 240 |
+
{
|
| 241 |
+
for(int jj = 0; jj < K1; jj++)
|
| 242 |
+
{
|
| 243 |
+
b_g_k_n_permute(bs * BatchStrideB + j * N * K1 + i * K1 + jj) =
|
| 244 |
+
b_g_k_n(bs * BatchStrideB + i * K + (j * K1 + jj));
|
| 245 |
+
}
|
| 246 |
+
}
|
| 247 |
+
}
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
if(is_same_v<BDataType, pk_i4_t> && is_same_v<ADataType, half_t>)
|
| 251 |
+
{
|
| 252 |
+
// vector pk_i4x4 permute
|
| 253 |
+
for(int bs = 0; bs < BatchSize; bs++)
|
| 254 |
+
{
|
| 255 |
+
for(int i = 0; i < N; i++)
|
| 256 |
+
{
|
| 257 |
+
for(int j = 0; j < K; j += 8)
|
| 258 |
+
{
|
| 259 |
+
int input[8];
|
| 260 |
+
|
| 261 |
+
for(int k = 0; k < 4; k++)
|
| 262 |
+
{
|
| 263 |
+
int i4x2 = b_g_k_n_permute(bs, j + k * 2, i).data;
|
| 264 |
+
input[k * 2 + 0] = (i4x2 >> 4) & 0xf;
|
| 265 |
+
input[k * 2 + 1] = (i4x2 >> 0) & 0xf;
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
// permute 01234567->20643175
|
| 269 |
+
{
|
| 270 |
+
int hi = input[2];
|
| 271 |
+
int lo = input[0];
|
| 272 |
+
int i4x2 = (hi << 4) | lo;
|
| 273 |
+
|
| 274 |
+
b_g_k_n_permute(bs, j + 0, i) = i4x2;
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
{
|
| 278 |
+
int hi = input[6];
|
| 279 |
+
int lo = input[4];
|
| 280 |
+
int i4x2 = (hi << 4) | lo;
|
| 281 |
+
|
| 282 |
+
b_g_k_n_permute(bs, j + 2, i) = i4x2;
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
{
|
| 286 |
+
int hi = input[3];
|
| 287 |
+
int lo = input[1];
|
| 288 |
+
int i4x2 = (hi << 4) | lo;
|
| 289 |
+
|
| 290 |
+
b_g_k_n_permute(bs, j + 4, i) = i4x2;
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
{
|
| 294 |
+
int hi = input[7];
|
| 295 |
+
int lo = input[5];
|
| 296 |
+
int i4x2 = (hi << 4) | lo;
|
| 297 |
+
|
| 298 |
+
b_g_k_n_permute(bs, j + 6, i) = i4x2;
|
| 299 |
+
}
|
| 300 |
+
}
|
| 301 |
+
}
|
| 302 |
+
}
|
| 303 |
+
}
|
| 304 |
+
}
|
| 305 |
+
else
|
| 306 |
+
{
|
| 307 |
+
b_g_k_n_permute = b_g_k_n;
|
| 308 |
+
}
|
| 309 |
+
|
| 310 |
+
b_device_buf.ToDevice(b_g_k_n_permute.mData.data());
|
| 311 |
+
|
| 312 |
+
std::vector<int> kbatch_list = {1, 2, 4, 8, 16, 19, 32, 38};
|
| 313 |
+
|
| 314 |
+
if(KBatch > 0)
|
| 315 |
+
{
|
| 316 |
+
kbatch_list = {KBatch};
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
for(std::size_t i = 0; i < kbatch_list.size(); i++)
|
| 320 |
+
{
|
| 321 |
+
auto kbatch_curr = kbatch_list[i];
|
| 322 |
+
|
| 323 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 324 |
+
static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
| 325 |
+
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
|
| 326 |
+
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 327 |
+
M,
|
| 328 |
+
N,
|
| 329 |
+
K,
|
| 330 |
+
StrideA,
|
| 331 |
+
StrideB,
|
| 332 |
+
StrideC,
|
| 333 |
+
Scale_Stride_BN,
|
| 334 |
+
BatchStrideA,
|
| 335 |
+
BatchStrideB,
|
| 336 |
+
BatchStrideC,
|
| 337 |
+
BatchStrideScaleB,
|
| 338 |
+
static_cast<BScaleDataType*>(b1_device_buf.GetDeviceBuffer()),
|
| 339 |
+
BatchSize, // Batch count
|
| 340 |
+
kbatch_curr, // Split K count
|
| 341 |
+
a_element_op,
|
| 342 |
+
b_element_op,
|
| 343 |
+
c_element_op);
|
| 344 |
+
|
| 345 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 346 |
+
|
| 347 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 348 |
+
{
|
| 349 |
+
|
| 350 |
+
// re-init C to zero before profiling next kernel
|
| 351 |
+
c_device_buf.SetZero();
|
| 352 |
+
|
| 353 |
+
// invoker_ptr->Run(argument_ptr.get(),
|
| 354 |
+
// StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 355 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false, 0});
|
| 356 |
+
|
| 357 |
+
if(do_verification)
|
| 358 |
+
{
|
| 359 |
+
c_device_buf.FromDevice(c_g_m_n_device_result.mData.data());
|
| 360 |
+
|
| 361 |
+
#if defined CK_ENABLE_FP8
|
| 362 |
+
// set softer tolerances for fp8
|
| 363 |
+
if constexpr(is_same_v<ADataType, f8_t> || is_same_v<BDataType, f8_t> ||
|
| 364 |
+
is_same_v<CDataType, f8_t>)
|
| 365 |
+
{
|
| 366 |
+
std::string msg = "Error: Incorrect results!";
|
| 367 |
+
double rtol = 1e-1;
|
| 368 |
+
double atol = 1e-1;
|
| 369 |
+
pass =
|
| 370 |
+
pass & ck::utils::check_err(
|
| 371 |
+
c_g_m_n_device_result, c_g_m_n_host_result, msg, rtol, atol);
|
| 372 |
+
}
|
| 373 |
+
else
|
| 374 |
+
{
|
| 375 |
+
#endif
|
| 376 |
+
pass =
|
| 377 |
+
pass & ck::utils::check_err(c_g_m_n_device_result, c_g_m_n_host_result);
|
| 378 |
+
#if defined CK_ENABLE_FP8
|
| 379 |
+
}
|
| 380 |
+
#endif
|
| 381 |
+
|
| 382 |
+
if(do_log)
|
| 383 |
+
{
|
| 384 |
+
LogRangeAsType<float>(std::cout << "a : ", a_g_m_k.mData, ",") << std::endl;
|
| 385 |
+
LogRangeAsType<float>(std::cout << "b: ", b_g_k_n.mData, ",") << std::endl;
|
| 386 |
+
LogRangeAsType<float>(
|
| 387 |
+
std::cout << "c_host : ", c_g_m_n_host_result.mData, ",")
|
| 388 |
+
<< std::endl;
|
| 389 |
+
LogRangeAsType<float>(
|
| 390 |
+
std::cout << "c_device: ", c_g_m_n_device_result.mData, ",")
|
| 391 |
+
<< std::endl;
|
| 392 |
+
}
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 396 |
+
|
| 397 |
+
float ave_time = invoker_ptr->Run(argument_ptr.get(),
|
| 398 |
+
StreamConfig{nullptr,
|
| 399 |
+
time_kernel,
|
| 400 |
+
0,
|
| 401 |
+
n_warmup,
|
| 402 |
+
n_iter,
|
| 403 |
+
rotating_count > 1,
|
| 404 |
+
rotating_count});
|
| 405 |
+
|
| 406 |
+
std::size_t flop = std::size_t(2) * M * N * K * BatchSize;
|
| 407 |
+
|
| 408 |
+
static constexpr index_t BPackedSize = []() {
|
| 409 |
+
if constexpr(is_same_v<remove_cvref_t<BDataType>, pk_i4_t>)
|
| 410 |
+
return 2;
|
| 411 |
+
else
|
| 412 |
+
return 1;
|
| 413 |
+
}();
|
| 414 |
+
|
| 415 |
+
std::size_t num_btype = sizeof(ADataType) * M * K +
|
| 416 |
+
sizeof(BDataType) * K * N / BPackedSize +
|
| 417 |
+
sizeof(CDataType) * M * N;
|
| 418 |
+
|
| 419 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 420 |
+
|
| 421 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 422 |
+
|
| 423 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops
|
| 424 |
+
<< " TFlops, " << gb_per_sec << " GB/s, " << op_name << ", KBatch "
|
| 425 |
+
<< kbatch_curr << std::endl;
|
| 426 |
+
|
| 427 |
+
if(tflops > best_tflops && ave_time > 1e-10)
|
| 428 |
+
{
|
| 429 |
+
best_op_name = op_name;
|
| 430 |
+
best_tflops = tflops;
|
| 431 |
+
best_ave_time = ave_time;
|
| 432 |
+
best_gb_per_sec = gb_per_sec;
|
| 433 |
+
best_kbatch = kbatch_curr;
|
| 434 |
+
}
|
| 435 |
+
}
|
| 436 |
+
else
|
| 437 |
+
{
|
| 438 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem"
|
| 439 |
+
<< std::endl;
|
| 440 |
+
}
|
| 441 |
+
}
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
if constexpr(is_same<CDataType, float>::value)
|
| 445 |
+
{
|
| 446 |
+
std::cout << "Best Perf for datatype = f32";
|
| 447 |
+
}
|
| 448 |
+
else if constexpr(is_same<CDataType, half_t>::value)
|
| 449 |
+
{
|
| 450 |
+
std::cout << "Best Perf for datatype = f16";
|
| 451 |
+
}
|
| 452 |
+
else if constexpr(is_same<CDataType, bhalf_t>::value)
|
| 453 |
+
{
|
| 454 |
+
std::cout << "Best Perf for datatype = bf16";
|
| 455 |
+
}
|
| 456 |
+
else if constexpr(is_same<CDataType, int8_t>::value)
|
| 457 |
+
{
|
| 458 |
+
std::cout << "Best Perf for datatype = int8";
|
| 459 |
+
}
|
| 460 |
+
|
| 461 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value)
|
| 462 |
+
{
|
| 463 |
+
std::cout << " ALayout = RowMajor";
|
| 464 |
+
}
|
| 465 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 466 |
+
{
|
| 467 |
+
std::cout << " ALayout = ColumnMajor";
|
| 468 |
+
}
|
| 469 |
+
|
| 470 |
+
if constexpr(is_same<BLayout, tensor_layout::gemm::RowMajor>::value)
|
| 471 |
+
{
|
| 472 |
+
std::cout << " BLayout = RowMajor";
|
| 473 |
+
}
|
| 474 |
+
else if constexpr(is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 475 |
+
{
|
| 476 |
+
std::cout << " BLayout = ColumnMajor";
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
std::cout << " M = " << M << " N = " << N << " K = " << K << " StrideA = " << StrideA
|
| 480 |
+
<< " StrideB = " << StrideB << " StrideC = " << StrideC << " KBatch = " << best_kbatch
|
| 481 |
+
<< " : " << best_ave_time << " ms, " << best_tflops << " TFlops, " << best_gb_per_sec
|
| 482 |
+
<< " GB/s, " << best_op_name << std::endl;
|
| 483 |
+
|
| 484 |
+
return pass;
|
| 485 |
+
}
|
| 486 |
+
|
| 487 |
+
} // namespace profiler
|
| 488 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batched_gemm_gemm_impl.hpp
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <memory>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/device_batched_gemm_gemm.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 12 |
+
|
| 13 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm_gemm.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/utility/check_err.hpp"
|
| 16 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 17 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 18 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 19 |
+
#include "ck/library/utility/literals.hpp"
|
| 20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
| 21 |
+
|
| 22 |
+
namespace ck {
|
| 23 |
+
namespace profiler {
|
| 24 |
+
|
| 25 |
+
template <typename ADataType,
|
| 26 |
+
typename B0DataType,
|
| 27 |
+
typename B1DataType,
|
| 28 |
+
typename CDataType,
|
| 29 |
+
typename ALayout,
|
| 30 |
+
typename B0Layout,
|
| 31 |
+
typename B1Layout,
|
| 32 |
+
typename CLayout>
|
| 33 |
+
bool profile_batched_gemm_gemm_impl(bool do_verification,
|
| 34 |
+
int init_method,
|
| 35 |
+
bool do_log,
|
| 36 |
+
bool time_kernel,
|
| 37 |
+
int M,
|
| 38 |
+
int N,
|
| 39 |
+
int K,
|
| 40 |
+
int O,
|
| 41 |
+
int BatchCount = 1,
|
| 42 |
+
int StrideA = -1,
|
| 43 |
+
int StrideB0 = -1,
|
| 44 |
+
int StrideB1 = -1,
|
| 45 |
+
int StrideC = -1,
|
| 46 |
+
int BatchStrideA = -1,
|
| 47 |
+
int BatchStrideB0 = -1,
|
| 48 |
+
int BatchStrideB1 = -1,
|
| 49 |
+
int BatchStrideC = -1)
|
| 50 |
+
|
| 51 |
+
{
|
| 52 |
+
|
| 53 |
+
using Row = tensor_layout::gemm::RowMajor;
|
| 54 |
+
using Col = tensor_layout::gemm::ColumnMajor;
|
| 55 |
+
using PassThrough = tensor_operation::element_wise::PassThrough;
|
| 56 |
+
using AElementOp = PassThrough;
|
| 57 |
+
using B0ElementOp = PassThrough;
|
| 58 |
+
using B1ElementOp = PassThrough;
|
| 59 |
+
using Acc0ElementOp = PassThrough;
|
| 60 |
+
using CElementOp = PassThrough;
|
| 61 |
+
using AccDataType = float;
|
| 62 |
+
|
| 63 |
+
// Ref Gemm0
|
| 64 |
+
using ReferenceGemm0Instance = tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
| 65 |
+
B0DataType,
|
| 66 |
+
ADataType,
|
| 67 |
+
AccDataType,
|
| 68 |
+
AElementOp,
|
| 69 |
+
B0ElementOp,
|
| 70 |
+
CElementOp>;
|
| 71 |
+
|
| 72 |
+
// Ref Gemm
|
| 73 |
+
using ReferenceGemm1Instance = tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
| 74 |
+
B1DataType,
|
| 75 |
+
CDataType,
|
| 76 |
+
AccDataType,
|
| 77 |
+
AElementOp,
|
| 78 |
+
B1ElementOp,
|
| 79 |
+
CElementOp>;
|
| 80 |
+
|
| 81 |
+
bool pass = true;
|
| 82 |
+
|
| 83 |
+
const int DefaultStrideA = ck::is_same_v<ALayout, Row> ? K : M;
|
| 84 |
+
const int DefaultStrideB0 = ck::is_same_v<B0Layout, Row> ? N : K;
|
| 85 |
+
const int DefaultStrideB1 = ck::is_same_v<B1Layout, Row> ? O : N;
|
| 86 |
+
const int DefaultStrideC = ck::is_same_v<CLayout, Row> ? O : M;
|
| 87 |
+
|
| 88 |
+
StrideA = (StrideA < 0) ? DefaultStrideA : StrideA;
|
| 89 |
+
StrideB0 = (StrideB0 < 0) ? DefaultStrideB0 : StrideB0;
|
| 90 |
+
StrideB1 = (StrideB1 < 0) ? DefaultStrideB1 : StrideB1;
|
| 91 |
+
StrideC = (StrideC < 0) ? DefaultStrideC : StrideC;
|
| 92 |
+
|
| 93 |
+
const int DefaultBatchStrideA = (ck::is_same_v<ALayout, Col> ? K : M) * StrideA;
|
| 94 |
+
const int DefaultBatchStrideB0 = (ck::is_same_v<B0Layout, Col> ? N : K) * StrideB0;
|
| 95 |
+
const int DefaultBatchStrideB1 = (ck::is_same_v<B1Layout, Col> ? O : N) * StrideB1;
|
| 96 |
+
const int DefaultBatchStrideC = (ck::is_same_v<CLayout, Col> ? O : M) * StrideC;
|
| 97 |
+
|
| 98 |
+
BatchStrideA = BatchStrideA < 0 ? DefaultBatchStrideA : BatchStrideA;
|
| 99 |
+
BatchStrideB0 = BatchStrideB0 < 0 ? DefaultBatchStrideB0 : BatchStrideB0;
|
| 100 |
+
BatchStrideB1 = BatchStrideB1 < 0 ? DefaultBatchStrideB1 : BatchStrideB1;
|
| 101 |
+
BatchStrideC = BatchStrideC < 0 ? DefaultBatchStrideC : BatchStrideC;
|
| 102 |
+
|
| 103 |
+
auto f_host_tensor_descriptor = [](std::size_t batch_count,
|
| 104 |
+
std::size_t row,
|
| 105 |
+
std::size_t col,
|
| 106 |
+
std::size_t stride,
|
| 107 |
+
std::size_t batch_stride,
|
| 108 |
+
auto layout) {
|
| 109 |
+
using namespace ck::literals;
|
| 110 |
+
|
| 111 |
+
if(std::is_same<decltype(layout), Row>::value)
|
| 112 |
+
{
|
| 113 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, stride, 1_uz});
|
| 114 |
+
}
|
| 115 |
+
else
|
| 116 |
+
{
|
| 117 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, 1_uz, stride});
|
| 118 |
+
}
|
| 119 |
+
};
|
| 120 |
+
|
| 121 |
+
// C_m_o = A_m_k * B0_k_n * B1_n_o
|
| 122 |
+
Tensor<ADataType> a_g_m_k(
|
| 123 |
+
f_host_tensor_descriptor(BatchCount, M, K, StrideA, BatchStrideA, ALayout{}));
|
| 124 |
+
Tensor<B0DataType> b0_g_k_n(
|
| 125 |
+
f_host_tensor_descriptor(BatchCount, K, N, StrideB0, BatchStrideB0, B0Layout{}));
|
| 126 |
+
Tensor<B1DataType> b1_g_n_o(
|
| 127 |
+
f_host_tensor_descriptor(BatchCount, N, O, StrideB1, BatchStrideB1, B1Layout{}));
|
| 128 |
+
Tensor<CDataType> c_g_m_o_host_result(
|
| 129 |
+
f_host_tensor_descriptor(BatchCount, M, O, StrideC, BatchStrideC, CLayout{}));
|
| 130 |
+
Tensor<CDataType> c_g_m_o_device_result(
|
| 131 |
+
f_host_tensor_descriptor(BatchCount, M, O, StrideC, BatchStrideC, CLayout{}));
|
| 132 |
+
// Host verification: Output of Gemm0 is input A of Gemm1
|
| 133 |
+
Tensor<ADataType> acc0_g_m_n(f_host_tensor_descriptor(BatchCount, M, N, N, M * N, Row{}));
|
| 134 |
+
|
| 135 |
+
std::cout << "a_g_m_k: " << a_g_m_k.mDesc << std::endl;
|
| 136 |
+
std::cout << "b0_g_k_n: " << b0_g_k_n.mDesc << std::endl;
|
| 137 |
+
std::cout << "b1_g_n_o: " << b1_g_n_o.mDesc << std::endl;
|
| 138 |
+
std::cout << "c_g_m_o: " << c_g_m_o_host_result.mDesc << std::endl;
|
| 139 |
+
|
| 140 |
+
switch(init_method)
|
| 141 |
+
{
|
| 142 |
+
case 0: break;
|
| 143 |
+
case 1:
|
| 144 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 3});
|
| 145 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-2, 3});
|
| 146 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_2<B1DataType>{-2, 3});
|
| 147 |
+
break;
|
| 148 |
+
case 2:
|
| 149 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 150 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_3<B0DataType>{0.0, 1.0});
|
| 151 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_3<B1DataType>{-0.5, 0.5});
|
| 152 |
+
break;
|
| 153 |
+
case 3:
|
| 154 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
| 155 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_Diagonal<B0DataType>{});
|
| 156 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_Diagonal<B1DataType>{});
|
| 157 |
+
break;
|
| 158 |
+
default:
|
| 159 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_1<ADataType>{1});
|
| 160 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_Sequential<B0DataType, 1>{});
|
| 161 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_Diagonal<B1DataType>{});
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
DeviceMem a_g_m_k_device_buf(sizeof(ADataType) * a_g_m_k.mDesc.GetElementSize());
|
| 165 |
+
DeviceMem b0_g_k_n_device_buf(sizeof(B0DataType) * b0_g_k_n.mDesc.GetElementSize());
|
| 166 |
+
DeviceMem b1_g_n_o_device_buf(sizeof(B1DataType) * b1_g_n_o.mDesc.GetElementSize());
|
| 167 |
+
DeviceMem c_g_m_o_device_buf(sizeof(CDataType) * c_g_m_o_device_result.mDesc.GetElementSize());
|
| 168 |
+
|
| 169 |
+
a_g_m_k_device_buf.ToDevice(a_g_m_k.mData.data());
|
| 170 |
+
b0_g_k_n_device_buf.ToDevice(b0_g_k_n.mData.data());
|
| 171 |
+
b1_g_n_o_device_buf.ToDevice(b1_g_n_o.mData.data());
|
| 172 |
+
|
| 173 |
+
auto a_element_op = AElementOp{};
|
| 174 |
+
auto b0_element_op = B0ElementOp{};
|
| 175 |
+
auto acc0_element_op = Acc0ElementOp{};
|
| 176 |
+
auto b1_element_op = B1ElementOp{};
|
| 177 |
+
auto c_element_op = CElementOp{};
|
| 178 |
+
|
| 179 |
+
using DeviceOp = tensor_operation::device::DeviceBatchedGemmGemm<ALayout,
|
| 180 |
+
B0Layout,
|
| 181 |
+
B1Layout,
|
| 182 |
+
CLayout,
|
| 183 |
+
ADataType,
|
| 184 |
+
B0DataType,
|
| 185 |
+
B1DataType,
|
| 186 |
+
CDataType,
|
| 187 |
+
AElementOp,
|
| 188 |
+
B0ElementOp,
|
| 189 |
+
Acc0ElementOp,
|
| 190 |
+
B1ElementOp,
|
| 191 |
+
CElementOp>;
|
| 192 |
+
|
| 193 |
+
// get device op instances
|
| 194 |
+
const auto op_ptrs = tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 195 |
+
DeviceOp>::GetInstances();
|
| 196 |
+
|
| 197 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 198 |
+
|
| 199 |
+
// early fail when no instances are found
|
| 200 |
+
if(op_ptrs.size() == 0)
|
| 201 |
+
{
|
| 202 |
+
return false;
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
if(do_verification)
|
| 206 |
+
{
|
| 207 |
+
auto ref_gemm0 = ReferenceGemm0Instance{};
|
| 208 |
+
auto ref_gemm0_invoker = ref_gemm0.MakeInvoker();
|
| 209 |
+
auto ref_gemm0_argument = ref_gemm0.MakeArgument(
|
| 210 |
+
a_g_m_k, b0_g_k_n, acc0_g_m_n, a_element_op, b0_element_op, PassThrough{});
|
| 211 |
+
|
| 212 |
+
ref_gemm0_invoker.Run(ref_gemm0_argument);
|
| 213 |
+
|
| 214 |
+
auto ref_gemm1 = ReferenceGemm1Instance{};
|
| 215 |
+
auto ref_gemm1_invoker = ref_gemm1.MakeInvoker();
|
| 216 |
+
auto ref_gemm1_argument = ref_gemm1.MakeArgument(
|
| 217 |
+
acc0_g_m_n, b1_g_n_o, c_g_m_o_host_result, PassThrough{}, b1_element_op, c_element_op);
|
| 218 |
+
|
| 219 |
+
ref_gemm1_invoker.Run(ref_gemm1_argument);
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
std::string best_op_name;
|
| 223 |
+
float best_ave_time = 0;
|
| 224 |
+
float best_tflops = 0;
|
| 225 |
+
float best_gb_per_sec = 0;
|
| 226 |
+
|
| 227 |
+
// profile device op instances
|
| 228 |
+
for(auto& op_ptr : op_ptrs)
|
| 229 |
+
{
|
| 230 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 231 |
+
static_cast<ADataType*>(a_g_m_k_device_buf.GetDeviceBuffer()),
|
| 232 |
+
static_cast<B0DataType*>(b0_g_k_n_device_buf.GetDeviceBuffer()),
|
| 233 |
+
static_cast<B1DataType*>(b1_g_n_o_device_buf.GetDeviceBuffer()),
|
| 234 |
+
static_cast<CDataType*>(c_g_m_o_device_buf.GetDeviceBuffer()),
|
| 235 |
+
M,
|
| 236 |
+
N,
|
| 237 |
+
K,
|
| 238 |
+
O,
|
| 239 |
+
BatchCount,
|
| 240 |
+
StrideA,
|
| 241 |
+
StrideB0,
|
| 242 |
+
StrideB1,
|
| 243 |
+
StrideC,
|
| 244 |
+
BatchStrideA,
|
| 245 |
+
BatchStrideB0,
|
| 246 |
+
BatchStrideB1,
|
| 247 |
+
BatchStrideC,
|
| 248 |
+
a_element_op,
|
| 249 |
+
b0_element_op,
|
| 250 |
+
acc0_element_op,
|
| 251 |
+
b1_element_op,
|
| 252 |
+
c_element_op);
|
| 253 |
+
|
| 254 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 255 |
+
|
| 256 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 257 |
+
{
|
| 258 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 259 |
+
|
| 260 |
+
float ave_time =
|
| 261 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 262 |
+
|
| 263 |
+
std::size_t flop = (size_t(M) * N * K * 2 + size_t(M) * N * O * 2) * BatchCount;
|
| 264 |
+
std::size_t num_btype = (sizeof(ADataType) * M * K + sizeof(B0DataType) * K * N +
|
| 265 |
+
sizeof(B1DataType) * N * O + sizeof(CDataType) * M * O) *
|
| 266 |
+
BatchCount;
|
| 267 |
+
|
| 268 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 269 |
+
|
| 270 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 271 |
+
|
| 272 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
| 273 |
+
<< " GB/s, " << op_name << std::endl;
|
| 274 |
+
|
| 275 |
+
if(tflops > best_tflops)
|
| 276 |
+
{
|
| 277 |
+
best_op_name = op_name;
|
| 278 |
+
best_tflops = tflops;
|
| 279 |
+
best_ave_time = ave_time;
|
| 280 |
+
best_gb_per_sec = gb_per_sec;
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
if(do_verification)
|
| 284 |
+
{
|
| 285 |
+
c_g_m_o_device_buf.FromDevice(c_g_m_o_device_result.mData.data());
|
| 286 |
+
|
| 287 |
+
pass = pass & ck::utils::check_err(c_g_m_o_device_result, c_g_m_o_host_result);
|
| 288 |
+
|
| 289 |
+
if(do_log)
|
| 290 |
+
{
|
| 291 |
+
LogRangeAsType<float>(std::cout << "a_g_m_k: ", a_g_m_k.mData, ",")
|
| 292 |
+
<< std::endl;
|
| 293 |
+
LogRangeAsType<float>(std::cout << "b0_g_k_n : ", b0_g_k_n.mData, ",")
|
| 294 |
+
<< std::endl;
|
| 295 |
+
LogRangeAsType<float>(std::cout << "b1_g_n_o : ", b1_g_n_o.mData, ",")
|
| 296 |
+
<< std::endl;
|
| 297 |
+
LogRangeAsType<float>(
|
| 298 |
+
std::cout << "c_g_m_o_host_result : ", c_g_m_o_host_result.mData, ",")
|
| 299 |
+
<< std::endl;
|
| 300 |
+
LogRangeAsType<float>(
|
| 301 |
+
std::cout << "c_g_m_o_device_result : ", c_g_m_o_device_result.mData, ",")
|
| 302 |
+
<< std::endl;
|
| 303 |
+
}
|
| 304 |
+
}
|
| 305 |
+
}
|
| 306 |
+
else
|
| 307 |
+
{
|
| 308 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 309 |
+
}
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 313 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
| 314 |
+
|
| 315 |
+
return pass;
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
} // namespace profiler
|
| 319 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batched_gemm_softmax_gemm_impl.hpp
ADDED
|
@@ -0,0 +1,347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <memory>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/device_batched_gemm_softmax_gemm.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 12 |
+
|
| 13 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm_softmax_gemm.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/utility/check_err.hpp"
|
| 16 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 17 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 18 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 19 |
+
#include "ck/library/utility/literals.hpp"
|
| 20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
| 21 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
|
| 22 |
+
|
| 23 |
+
namespace ck {
|
| 24 |
+
namespace profiler {
|
| 25 |
+
|
| 26 |
+
template <typename ADataType,
|
| 27 |
+
typename B0DataType,
|
| 28 |
+
typename B1DataType,
|
| 29 |
+
typename CDataType,
|
| 30 |
+
typename ALayout,
|
| 31 |
+
typename B0Layout,
|
| 32 |
+
typename B1Layout,
|
| 33 |
+
typename CLayout,
|
| 34 |
+
bool MaskOutUpperTriangle>
|
| 35 |
+
bool profile_batched_gemm_softmax_gemm_impl(bool do_verification,
|
| 36 |
+
int init_method,
|
| 37 |
+
bool do_log,
|
| 38 |
+
bool time_kernel,
|
| 39 |
+
int M,
|
| 40 |
+
int N,
|
| 41 |
+
int K,
|
| 42 |
+
int O,
|
| 43 |
+
int BatchCount = 1,
|
| 44 |
+
int StrideA = -1,
|
| 45 |
+
int StrideB0 = -1,
|
| 46 |
+
int StrideB1 = -1,
|
| 47 |
+
int StrideC = -1,
|
| 48 |
+
int BatchStrideA = -1,
|
| 49 |
+
int BatchStrideB0 = -1,
|
| 50 |
+
int BatchStrideB1 = -1,
|
| 51 |
+
int BatchStrideC = -1,
|
| 52 |
+
float alpha = -1.f)
|
| 53 |
+
|
| 54 |
+
{
|
| 55 |
+
|
| 56 |
+
using Row = tensor_layout::gemm::RowMajor;
|
| 57 |
+
using Col = tensor_layout::gemm::ColumnMajor;
|
| 58 |
+
using PassThrough = tensor_operation::element_wise::PassThrough;
|
| 59 |
+
using Scale = tensor_operation::element_wise::Scale;
|
| 60 |
+
using AElementOp = PassThrough;
|
| 61 |
+
using B0ElementOp = PassThrough;
|
| 62 |
+
using Acc0ElementOp = Scale;
|
| 63 |
+
using B1ElementOp = PassThrough;
|
| 64 |
+
using CElementOp = PassThrough;
|
| 65 |
+
using AccDataType = float;
|
| 66 |
+
|
| 67 |
+
// Ref Gemm0: various type in, fp32 out
|
| 68 |
+
using ReferenceGemm0Instance = tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
| 69 |
+
B0DataType,
|
| 70 |
+
AccDataType,
|
| 71 |
+
AccDataType,
|
| 72 |
+
AElementOp,
|
| 73 |
+
B0ElementOp,
|
| 74 |
+
Acc0ElementOp>;
|
| 75 |
+
|
| 76 |
+
// Ref Softmax: fp32 in, various type out
|
| 77 |
+
using ReferenceSoftmaxInstance =
|
| 78 |
+
tensor_operation::host::ReferenceSoftmax<AccDataType, ADataType, AccDataType>;
|
| 79 |
+
|
| 80 |
+
// Ref Gemm1: various type in, various type out
|
| 81 |
+
using ReferenceGemm1Instance = tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
| 82 |
+
B1DataType,
|
| 83 |
+
CDataType,
|
| 84 |
+
AccDataType,
|
| 85 |
+
AElementOp,
|
| 86 |
+
B1ElementOp,
|
| 87 |
+
CElementOp>;
|
| 88 |
+
|
| 89 |
+
bool pass = true;
|
| 90 |
+
|
| 91 |
+
const int DefaultStrideA = ck::is_same_v<ALayout, Row> ? K : M;
|
| 92 |
+
const int DefaultStrideB0 = ck::is_same_v<B0Layout, Row> ? N : K;
|
| 93 |
+
const int DefaultStrideB1 = ck::is_same_v<B1Layout, Row> ? O : N;
|
| 94 |
+
const int DefaultStrideC = ck::is_same_v<CLayout, Row> ? O : M;
|
| 95 |
+
|
| 96 |
+
StrideA = (StrideA < 0) ? DefaultStrideA : StrideA;
|
| 97 |
+
StrideB0 = (StrideB0 < 0) ? DefaultStrideB0 : StrideB0;
|
| 98 |
+
StrideB1 = (StrideB1 < 0) ? DefaultStrideB1 : StrideB1;
|
| 99 |
+
StrideC = (StrideC < 0) ? DefaultStrideC : StrideC;
|
| 100 |
+
|
| 101 |
+
const int DefaultBatchStrideA = (ck::is_same_v<ALayout, Col> ? K : M) * StrideA;
|
| 102 |
+
const int DefaultBatchStrideB0 = (ck::is_same_v<B0Layout, Col> ? N : K) * StrideB0;
|
| 103 |
+
const int DefaultBatchStrideB1 = (ck::is_same_v<B1Layout, Col> ? O : N) * StrideB1;
|
| 104 |
+
const int DefaultBatchStrideC = (ck::is_same_v<CLayout, Col> ? O : M) * StrideC;
|
| 105 |
+
|
| 106 |
+
BatchStrideA = BatchStrideA < 0 ? DefaultBatchStrideA : BatchStrideA;
|
| 107 |
+
BatchStrideB0 = BatchStrideB0 < 0 ? DefaultBatchStrideB0 : BatchStrideB0;
|
| 108 |
+
BatchStrideB1 = BatchStrideB1 < 0 ? DefaultBatchStrideB1 : BatchStrideB1;
|
| 109 |
+
BatchStrideC = BatchStrideC < 0 ? DefaultBatchStrideC : BatchStrideC;
|
| 110 |
+
|
| 111 |
+
auto f_host_tensor_descriptor = [](std::size_t batch_count,
|
| 112 |
+
std::size_t row,
|
| 113 |
+
std::size_t col,
|
| 114 |
+
std::size_t stride,
|
| 115 |
+
std::size_t batch_stride,
|
| 116 |
+
auto layout) {
|
| 117 |
+
using namespace ck::literals;
|
| 118 |
+
|
| 119 |
+
if(std::is_same<decltype(layout), Row>::value)
|
| 120 |
+
{
|
| 121 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, stride, 1_uz});
|
| 122 |
+
}
|
| 123 |
+
else
|
| 124 |
+
{
|
| 125 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, 1_uz, stride});
|
| 126 |
+
}
|
| 127 |
+
};
|
| 128 |
+
|
| 129 |
+
// C_m_o = A_m_k * B0_k_n * B1_n_o
|
| 130 |
+
Tensor<ADataType> a_g_m_k(
|
| 131 |
+
f_host_tensor_descriptor(BatchCount, M, K, StrideA, BatchStrideA, ALayout{}));
|
| 132 |
+
Tensor<B0DataType> b0_g_k_n(
|
| 133 |
+
f_host_tensor_descriptor(BatchCount, K, N, StrideB0, BatchStrideB0, B0Layout{}));
|
| 134 |
+
Tensor<B1DataType> b1_g_n_o(
|
| 135 |
+
f_host_tensor_descriptor(BatchCount, N, O, StrideB1, BatchStrideB1, B1Layout{}));
|
| 136 |
+
Tensor<CDataType> c_g_m_o_host_result(
|
| 137 |
+
f_host_tensor_descriptor(BatchCount, M, O, StrideC, BatchStrideC, CLayout{}));
|
| 138 |
+
Tensor<CDataType> c_g_m_o_device_result(
|
| 139 |
+
f_host_tensor_descriptor(BatchCount, M, O, StrideC, BatchStrideC, CLayout{}));
|
| 140 |
+
// Host verification: Output of Gemm0 is input A of Gemm1
|
| 141 |
+
Tensor<AccDataType> acc0_g_m_n(f_host_tensor_descriptor(BatchCount, M, N, N, M * N, Row{}));
|
| 142 |
+
Tensor<ADataType> a1_g_m_n(f_host_tensor_descriptor(BatchCount, M, N, N, M * N, Row{}));
|
| 143 |
+
|
| 144 |
+
std::cout << "a_g_m_k: " << a_g_m_k.mDesc << std::endl;
|
| 145 |
+
std::cout << "b0_g_k_n: " << b0_g_k_n.mDesc << std::endl;
|
| 146 |
+
std::cout << "b1_g_n_o: " << b1_g_n_o.mDesc << std::endl;
|
| 147 |
+
std::cout << "c_g_m_o: " << c_g_m_o_host_result.mDesc << std::endl;
|
| 148 |
+
|
| 149 |
+
std::srand(1); // work around test flakiness
|
| 150 |
+
switch(init_method)
|
| 151 |
+
{
|
| 152 |
+
case 0: break;
|
| 153 |
+
case 1:
|
| 154 |
+
// Still unsure whether this kind of deterministic floating point accurary issue is expected
|
| 155 |
+
// or not. May want to try exact same approach as the GPU kernel in the host reference
|
| 156 |
+
// GEMM+Softmax+GEMM function to see if the accuracy discrepancy goes away. Until then,
|
| 157 |
+
// shrink the input value range as it is less likely to produce errors of around ~1e-3.
|
| 158 |
+
// a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
| 159 |
+
// b0_g_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-5, 5});
|
| 160 |
+
// b1_g_n_o.GenerateTensorValue(GeneratorTensor_2<B1DataType>{-5, 5});
|
| 161 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
| 162 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-2, 2});
|
| 163 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_2<B1DataType>{-2, 2});
|
| 164 |
+
break;
|
| 165 |
+
case 2:
|
| 166 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 167 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_3<B0DataType>{0.0, 1.0});
|
| 168 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_3<B1DataType>{-0.5, 0.5});
|
| 169 |
+
break;
|
| 170 |
+
case 3:
|
| 171 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
| 172 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_Diagonal<B0DataType>{});
|
| 173 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_Diagonal<B1DataType>{});
|
| 174 |
+
break;
|
| 175 |
+
default:
|
| 176 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_1<ADataType>{1});
|
| 177 |
+
b0_g_k_n.GenerateTensorValue(GeneratorTensor_Sequential<B0DataType, 1>{});
|
| 178 |
+
b1_g_n_o.GenerateTensorValue(GeneratorTensor_Diagonal<B1DataType>{});
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
DeviceMem a_g_m_k_device_buf(sizeof(ADataType) * a_g_m_k.mDesc.GetElementSize());
|
| 182 |
+
DeviceMem b0_g_k_n_device_buf(sizeof(B0DataType) * b0_g_k_n.mDesc.GetElementSize());
|
| 183 |
+
DeviceMem b1_g_n_o_device_buf(sizeof(B1DataType) * b1_g_n_o.mDesc.GetElementSize());
|
| 184 |
+
DeviceMem c_g_m_o_device_buf(sizeof(CDataType) * c_g_m_o_device_result.mDesc.GetElementSize());
|
| 185 |
+
|
| 186 |
+
a_g_m_k_device_buf.ToDevice(a_g_m_k.mData.data());
|
| 187 |
+
b0_g_k_n_device_buf.ToDevice(b0_g_k_n.mData.data());
|
| 188 |
+
b1_g_n_o_device_buf.ToDevice(b1_g_n_o.mData.data());
|
| 189 |
+
|
| 190 |
+
if(alpha < 0)
|
| 191 |
+
{
|
| 192 |
+
alpha = 1.f / std::sqrt(K); // usually 1 / sqrt(head_dim)
|
| 193 |
+
}
|
| 194 |
+
auto a_element_op = AElementOp{};
|
| 195 |
+
auto b0_element_op = B0ElementOp{};
|
| 196 |
+
auto acc0_element_op = Acc0ElementOp{alpha};
|
| 197 |
+
auto b1_element_op = B1ElementOp{};
|
| 198 |
+
auto c_element_op = CElementOp{};
|
| 199 |
+
|
| 200 |
+
using DeviceOp = tensor_operation::device::DeviceBatchedGemmSoftmaxGemm<ALayout,
|
| 201 |
+
B0Layout,
|
| 202 |
+
B1Layout,
|
| 203 |
+
CLayout,
|
| 204 |
+
ADataType,
|
| 205 |
+
B0DataType,
|
| 206 |
+
B1DataType,
|
| 207 |
+
CDataType,
|
| 208 |
+
AElementOp,
|
| 209 |
+
B0ElementOp,
|
| 210 |
+
Acc0ElementOp,
|
| 211 |
+
B1ElementOp,
|
| 212 |
+
CElementOp,
|
| 213 |
+
MaskOutUpperTriangle>;
|
| 214 |
+
|
| 215 |
+
// get device op instances
|
| 216 |
+
const auto op_ptrs = tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 217 |
+
DeviceOp>::GetInstances();
|
| 218 |
+
|
| 219 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 220 |
+
|
| 221 |
+
if(do_verification)
|
| 222 |
+
{
|
| 223 |
+
auto ref_gemm0 = ReferenceGemm0Instance{};
|
| 224 |
+
auto ref_gemm0_invoker = ref_gemm0.MakeInvoker();
|
| 225 |
+
auto ref_gemm0_argument = ref_gemm0.MakeArgument(
|
| 226 |
+
a_g_m_k, b0_g_k_n, acc0_g_m_n, a_element_op, b0_element_op, Scale{alpha});
|
| 227 |
+
|
| 228 |
+
ref_gemm0_invoker.Run(ref_gemm0_argument);
|
| 229 |
+
|
| 230 |
+
// mask out upper triangle
|
| 231 |
+
acc0_g_m_n.ForEach([&](auto& self, auto idx) {
|
| 232 |
+
if(MaskOutUpperTriangle && idx[1] < idx[2])
|
| 233 |
+
self(idx) = -ck::NumericLimits<float>::Infinity();
|
| 234 |
+
});
|
| 235 |
+
|
| 236 |
+
auto ref_softmax = ReferenceSoftmaxInstance{};
|
| 237 |
+
auto ref_softmax_invoker = ref_softmax.MakeInvoker();
|
| 238 |
+
auto ref_softmax_argument = ref_softmax.MakeArgument(acc0_g_m_n, a1_g_m_n, 1, 0, {2});
|
| 239 |
+
|
| 240 |
+
ref_softmax_invoker.Run(ref_softmax_argument);
|
| 241 |
+
|
| 242 |
+
auto ref_gemm1 = ReferenceGemm1Instance{};
|
| 243 |
+
auto ref_gemm1_invoker = ref_gemm1.MakeInvoker();
|
| 244 |
+
auto ref_gemm1_argument = ref_gemm1.MakeArgument(
|
| 245 |
+
a1_g_m_n, b1_g_n_o, c_g_m_o_host_result, PassThrough{}, b1_element_op, c_element_op);
|
| 246 |
+
|
| 247 |
+
ref_gemm1_invoker.Run(ref_gemm1_argument);
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
std::string best_op_name;
|
| 251 |
+
float best_ave_time = 0;
|
| 252 |
+
float best_tflops = 0;
|
| 253 |
+
float best_gb_per_sec = 0;
|
| 254 |
+
|
| 255 |
+
// profile device op instances
|
| 256 |
+
for(auto& op_ptr : op_ptrs)
|
| 257 |
+
{
|
| 258 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 259 |
+
static_cast<ADataType*>(a_g_m_k_device_buf.GetDeviceBuffer()),
|
| 260 |
+
static_cast<B0DataType*>(b0_g_k_n_device_buf.GetDeviceBuffer()),
|
| 261 |
+
static_cast<B1DataType*>(b1_g_n_o_device_buf.GetDeviceBuffer()),
|
| 262 |
+
static_cast<CDataType*>(c_g_m_o_device_buf.GetDeviceBuffer()),
|
| 263 |
+
M,
|
| 264 |
+
N,
|
| 265 |
+
K,
|
| 266 |
+
O,
|
| 267 |
+
BatchCount,
|
| 268 |
+
StrideA,
|
| 269 |
+
StrideB0,
|
| 270 |
+
StrideB1,
|
| 271 |
+
StrideC,
|
| 272 |
+
BatchStrideA,
|
| 273 |
+
BatchStrideB0,
|
| 274 |
+
BatchStrideB1,
|
| 275 |
+
BatchStrideC,
|
| 276 |
+
a_element_op,
|
| 277 |
+
b0_element_op,
|
| 278 |
+
acc0_element_op,
|
| 279 |
+
b1_element_op,
|
| 280 |
+
c_element_op);
|
| 281 |
+
|
| 282 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 283 |
+
|
| 284 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 285 |
+
{
|
| 286 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 287 |
+
|
| 288 |
+
float ave_time =
|
| 289 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 290 |
+
|
| 291 |
+
std::size_t flop = (size_t(M) * N * K * 2 + size_t(M) * N * O * 2) * BatchCount;
|
| 292 |
+
std::size_t num_btype = (sizeof(ADataType) * M * K + sizeof(B0DataType) * K * N +
|
| 293 |
+
sizeof(B1DataType) * N * O + sizeof(CDataType) * M * O) *
|
| 294 |
+
BatchCount;
|
| 295 |
+
|
| 296 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 297 |
+
|
| 298 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 299 |
+
|
| 300 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
| 301 |
+
<< " GB/s, " << op_name << std::endl;
|
| 302 |
+
|
| 303 |
+
if(tflops > best_tflops)
|
| 304 |
+
{
|
| 305 |
+
best_op_name = op_name;
|
| 306 |
+
best_tflops = tflops;
|
| 307 |
+
best_ave_time = ave_time;
|
| 308 |
+
best_gb_per_sec = gb_per_sec;
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
if(do_verification)
|
| 312 |
+
{
|
| 313 |
+
c_g_m_o_device_buf.FromDevice(c_g_m_o_device_result.mData.data());
|
| 314 |
+
|
| 315 |
+
pass = pass & ck::utils::check_err(c_g_m_o_device_result, c_g_m_o_host_result);
|
| 316 |
+
|
| 317 |
+
if(do_log)
|
| 318 |
+
{
|
| 319 |
+
LogRangeAsType<float>(std::cout << "a_g_m_k: ", a_g_m_k.mData, ",")
|
| 320 |
+
<< std::endl;
|
| 321 |
+
LogRangeAsType<float>(std::cout << "b0_g_k_n : ", b0_g_k_n.mData, ",")
|
| 322 |
+
<< std::endl;
|
| 323 |
+
LogRangeAsType<float>(std::cout << "b1_g_n_o : ", b1_g_n_o.mData, ",")
|
| 324 |
+
<< std::endl;
|
| 325 |
+
LogRangeAsType<float>(
|
| 326 |
+
std::cout << "c_g_m_o_host_result : ", c_g_m_o_host_result.mData, ",")
|
| 327 |
+
<< std::endl;
|
| 328 |
+
LogRangeAsType<float>(
|
| 329 |
+
std::cout << "c_g_m_o_device_result : ", c_g_m_o_device_result.mData, ",")
|
| 330 |
+
<< std::endl;
|
| 331 |
+
}
|
| 332 |
+
}
|
| 333 |
+
}
|
| 334 |
+
else
|
| 335 |
+
{
|
| 336 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 337 |
+
}
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 341 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
| 342 |
+
|
| 343 |
+
return pass;
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
} // namespace profiler
|
| 347 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batched_gemm_softmax_gemm_permute_impl.hpp
ADDED
|
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <memory>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/device_batched_gemm_softmax_gemm_permute.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 12 |
+
|
| 13 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm_softmax_gemm_permute.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/utility/check_err.hpp"
|
| 16 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 17 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 18 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 19 |
+
#include "ck/library/utility/literals.hpp"
|
| 20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
| 21 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_softmax.hpp"
|
| 22 |
+
|
| 23 |
+
namespace ck {
|
| 24 |
+
namespace profiler {
|
| 25 |
+
|
| 26 |
+
template <index_t NumDimG,
|
| 27 |
+
index_t NumDimM,
|
| 28 |
+
index_t NumDimN,
|
| 29 |
+
index_t NumDimK,
|
| 30 |
+
index_t NumDimO,
|
| 31 |
+
typename ADataType,
|
| 32 |
+
typename B0DataType,
|
| 33 |
+
typename B1DataType,
|
| 34 |
+
typename CDataType,
|
| 35 |
+
typename Acc0BiasesDataType,
|
| 36 |
+
typename Acc1BiasesDataType,
|
| 37 |
+
tensor_operation::device::MaskingSpecialization MaskingSpec>
|
| 38 |
+
bool profile_batched_gemm_softmax_gemm_permute_impl(bool do_verification,
|
| 39 |
+
int init_method,
|
| 40 |
+
bool do_log,
|
| 41 |
+
bool time_kernel,
|
| 42 |
+
int M,
|
| 43 |
+
int N,
|
| 44 |
+
int K,
|
| 45 |
+
int O,
|
| 46 |
+
int G0,
|
| 47 |
+
int G1,
|
| 48 |
+
float alpha = -1.f)
|
| 49 |
+
|
| 50 |
+
{
|
| 51 |
+
|
| 52 |
+
using PassThrough = tensor_operation::element_wise::PassThrough;
|
| 53 |
+
using Scale = tensor_operation::element_wise::Scale;
|
| 54 |
+
using AElementOp = PassThrough;
|
| 55 |
+
using B0ElementOp = PassThrough;
|
| 56 |
+
using Acc0ElementOp = Scale;
|
| 57 |
+
using B1ElementOp = PassThrough;
|
| 58 |
+
using CElementOp = PassThrough;
|
| 59 |
+
using AccDataType = float;
|
| 60 |
+
using tensor_operation::device::MaskingSpecialization;
|
| 61 |
+
|
| 62 |
+
// Ref Gemm0: various type in, fp32 out
|
| 63 |
+
using ReferenceGemm0Instance = tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
| 64 |
+
B0DataType,
|
| 65 |
+
AccDataType,
|
| 66 |
+
AccDataType,
|
| 67 |
+
AElementOp,
|
| 68 |
+
B0ElementOp,
|
| 69 |
+
Acc0ElementOp>;
|
| 70 |
+
|
| 71 |
+
// Ref Softmax: fp32 in, various type out
|
| 72 |
+
using ReferenceSoftmaxInstance =
|
| 73 |
+
tensor_operation::host::ReferenceSoftmax<AccDataType, ADataType, AccDataType>;
|
| 74 |
+
|
| 75 |
+
// Ref Gemm1: various type in, various type out
|
| 76 |
+
using ReferenceGemm1Instance = tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
| 77 |
+
B1DataType,
|
| 78 |
+
CDataType,
|
| 79 |
+
AccDataType,
|
| 80 |
+
AElementOp,
|
| 81 |
+
B1ElementOp,
|
| 82 |
+
CElementOp>;
|
| 83 |
+
|
| 84 |
+
bool pass = true;
|
| 85 |
+
|
| 86 |
+
// A layout [G0, M, G1, K]
|
| 87 |
+
std::vector<ck::index_t> a_gs_ms_ks_lengths{G0, G1, M, K};
|
| 88 |
+
std::vector<ck::index_t> a_gs_ms_ks_strides{M * G1 * K, K, G1 * K, 1};
|
| 89 |
+
|
| 90 |
+
// B0 layout [G0, N, G1, K]
|
| 91 |
+
std::vector<ck::index_t> b0_gs_ns_ks_lengths{G0, G1, N, K};
|
| 92 |
+
std::vector<ck::index_t> b0_gs_ns_ks_strides{N * G1 * K, K, G1 * K, 1};
|
| 93 |
+
|
| 94 |
+
// B1 layout [G0, N, G1, O]
|
| 95 |
+
std::vector<ck::index_t> b1_gs_os_ns_lengths{G0, G1, O, N};
|
| 96 |
+
std::vector<ck::index_t> b1_gs_os_ns_strides{N * G1 * O, O, 1, G1 * O};
|
| 97 |
+
|
| 98 |
+
// C layout [G0, M, G1, O]
|
| 99 |
+
std::vector<ck::index_t> c_gs_ms_os_lengths{G0, G1, M, O};
|
| 100 |
+
std::vector<ck::index_t> c_gs_ms_os_strides{M * G1 * O, O, G1 * O, 1};
|
| 101 |
+
|
| 102 |
+
const int BatchCount = G0 * G1;
|
| 103 |
+
|
| 104 |
+
Tensor<ADataType> a_gs_ms_ks(a_gs_ms_ks_lengths, a_gs_ms_ks_strides);
|
| 105 |
+
Tensor<B0DataType> b0_gs_ns_ks(b0_gs_ns_ks_lengths, b0_gs_ns_ks_strides);
|
| 106 |
+
Tensor<B1DataType> b1_gs_os_ns(b1_gs_os_ns_lengths, b1_gs_os_ns_strides);
|
| 107 |
+
Tensor<CDataType> c_gs_ms_os_host_result(c_gs_ms_os_lengths, c_gs_ms_os_strides);
|
| 108 |
+
Tensor<CDataType> c_gs_ms_os_device_result(c_gs_ms_os_lengths, c_gs_ms_os_strides);
|
| 109 |
+
|
| 110 |
+
std::cout << "a_gs_ms_ks: " << a_gs_ms_ks.mDesc << std::endl;
|
| 111 |
+
std::cout << "b0_gs_ns_ks: " << b0_gs_ns_ks.mDesc << std::endl;
|
| 112 |
+
std::cout << "b1_gs_os_ns: " << b1_gs_os_ns.mDesc << std::endl;
|
| 113 |
+
std::cout << "c_gs_ms_os: " << c_gs_ms_os_host_result.mDesc << std::endl;
|
| 114 |
+
|
| 115 |
+
std::srand(1); // work around test flakiness
|
| 116 |
+
switch(init_method)
|
| 117 |
+
{
|
| 118 |
+
case 0: break;
|
| 119 |
+
case 1:
|
| 120 |
+
// Still unsure whether this kind of deterministic floating point accurary issue is expected
|
| 121 |
+
// or not. May want to try exact same approach as the GPU kernel in the host reference
|
| 122 |
+
// GEMM+Softmax+GEMM function to see if the accuracy discrepancy goes away. Until then,
|
| 123 |
+
// shrink the input value range as it is less likely to produce errors of around ~1e-3.
|
| 124 |
+
// a_gs_ms_ks.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
| 125 |
+
// b0_gs_ns_ks.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-5, 5});
|
| 126 |
+
// b1_gs_os_ns.GenerateTensorValue(GeneratorTensor_2<B1DataType>{-5, 5});
|
| 127 |
+
a_gs_ms_ks.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
| 128 |
+
b0_gs_ns_ks.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-2, 2});
|
| 129 |
+
b1_gs_os_ns.GenerateTensorValue(GeneratorTensor_2<B1DataType>{-2, 2});
|
| 130 |
+
break;
|
| 131 |
+
case 2:
|
| 132 |
+
a_gs_ms_ks.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 133 |
+
b0_gs_ns_ks.GenerateTensorValue(GeneratorTensor_3<B0DataType>{0.0, 1.0});
|
| 134 |
+
b1_gs_os_ns.GenerateTensorValue(GeneratorTensor_3<B1DataType>{-0.5, 0.5});
|
| 135 |
+
break;
|
| 136 |
+
case 3:
|
| 137 |
+
a_gs_ms_ks.GenerateTensorValue(GeneratorTensor_2<ADataType>{-2, 2});
|
| 138 |
+
b0_gs_ns_ks.GenerateTensorValue(GeneratorTensor_Diagonal<B0DataType>{});
|
| 139 |
+
b1_gs_os_ns.GenerateTensorValue(GeneratorTensor_Diagonal<B1DataType>{});
|
| 140 |
+
break;
|
| 141 |
+
default:
|
| 142 |
+
a_gs_ms_ks.GenerateTensorValue(GeneratorTensor_1<ADataType>{1});
|
| 143 |
+
b0_gs_ns_ks.GenerateTensorValue(GeneratorTensor_Sequential<B0DataType, 1>{});
|
| 144 |
+
b1_gs_os_ns.GenerateTensorValue(GeneratorTensor_Diagonal<B1DataType>{});
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_gs_ms_ks.mDesc.GetElementSpaceSize());
|
| 148 |
+
DeviceMem b0_device_buf(sizeof(B0DataType) * b0_gs_ns_ks.mDesc.GetElementSpaceSize());
|
| 149 |
+
DeviceMem b1_device_buf(sizeof(B1DataType) * b1_gs_os_ns.mDesc.GetElementSpaceSize());
|
| 150 |
+
DeviceMem c_device_buf(sizeof(CDataType) *
|
| 151 |
+
c_gs_ms_os_device_result.mDesc.GetElementSpaceSize());
|
| 152 |
+
|
| 153 |
+
a_device_buf.ToDevice(a_gs_ms_ks.mData.data());
|
| 154 |
+
b0_device_buf.ToDevice(b0_gs_ns_ks.mData.data());
|
| 155 |
+
b1_device_buf.ToDevice(b1_gs_os_ns.mData.data());
|
| 156 |
+
|
| 157 |
+
if(alpha < 0)
|
| 158 |
+
{
|
| 159 |
+
alpha = 1.f / std::sqrt(K); // usually 1 / sqrt(head_dim)
|
| 160 |
+
}
|
| 161 |
+
auto a_element_op = AElementOp{};
|
| 162 |
+
auto b0_element_op = B0ElementOp{};
|
| 163 |
+
auto acc0_element_op = Acc0ElementOp{alpha};
|
| 164 |
+
auto b1_element_op = B1ElementOp{};
|
| 165 |
+
auto c_element_op = CElementOp{};
|
| 166 |
+
|
| 167 |
+
using DeviceOp = tensor_operation::device::DeviceBatchedGemmSoftmaxGemmPermute<2,
|
| 168 |
+
1,
|
| 169 |
+
1,
|
| 170 |
+
1,
|
| 171 |
+
1,
|
| 172 |
+
ADataType,
|
| 173 |
+
B0DataType,
|
| 174 |
+
B1DataType,
|
| 175 |
+
CDataType,
|
| 176 |
+
ck::Tuple<>,
|
| 177 |
+
ck::Tuple<>,
|
| 178 |
+
AElementOp,
|
| 179 |
+
B0ElementOp,
|
| 180 |
+
Acc0ElementOp,
|
| 181 |
+
B1ElementOp,
|
| 182 |
+
CElementOp,
|
| 183 |
+
MaskingSpec>;
|
| 184 |
+
|
| 185 |
+
// get device op instances
|
| 186 |
+
const auto op_ptrs = tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 187 |
+
DeviceOp>::GetInstances();
|
| 188 |
+
|
| 189 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 190 |
+
|
| 191 |
+
if(do_verification)
|
| 192 |
+
{
|
| 193 |
+
c_device_buf.FromDevice(c_gs_ms_os_device_result.mData.data());
|
| 194 |
+
|
| 195 |
+
Tensor<ADataType> a_g_m_k({BatchCount, M, K});
|
| 196 |
+
Tensor<B0DataType> b0_g_k_n({BatchCount, K, N});
|
| 197 |
+
Tensor<B1DataType> b1_g_n_o({BatchCount, N, O});
|
| 198 |
+
Tensor<AccDataType> acc0_g_m_n({BatchCount, M, N}); // scratch object after gemm0
|
| 199 |
+
Tensor<ADataType> a1_g_m_n({BatchCount, M, N}); // scratch object after softmax
|
| 200 |
+
Tensor<CDataType> c_g_m_o_host_result({BatchCount, M, O}); // scratch object after gemm1
|
| 201 |
+
|
| 202 |
+
// permute
|
| 203 |
+
a_gs_ms_ks.ForEach([&](auto& self, auto idx) {
|
| 204 |
+
a_g_m_k(idx[0] * G1 + idx[1], idx[2], idx[3]) = self(idx);
|
| 205 |
+
});
|
| 206 |
+
b0_gs_ns_ks.ForEach([&](auto& self, auto idx) {
|
| 207 |
+
b0_g_k_n(idx[0] * G1 + idx[1], idx[3], idx[2]) = self(idx);
|
| 208 |
+
});
|
| 209 |
+
b1_gs_os_ns.ForEach([&](auto& self, auto idx) {
|
| 210 |
+
b1_g_n_o(idx[0] * G1 + idx[1], idx[3], idx[2]) = self(idx);
|
| 211 |
+
});
|
| 212 |
+
|
| 213 |
+
auto ref_gemm0 = ReferenceGemm0Instance{};
|
| 214 |
+
auto ref_gemm0_invoker = ref_gemm0.MakeInvoker();
|
| 215 |
+
auto ref_gemm0_argument = ref_gemm0.MakeArgument(
|
| 216 |
+
a_g_m_k, b0_g_k_n, acc0_g_m_n, a_element_op, b0_element_op, Scale{alpha});
|
| 217 |
+
|
| 218 |
+
ref_gemm0_invoker.Run(ref_gemm0_argument);
|
| 219 |
+
|
| 220 |
+
// mask out upper triangle
|
| 221 |
+
acc0_g_m_n.ForEach([&](auto& self, auto idx) {
|
| 222 |
+
if(MaskingSpec == MaskingSpecialization::MaskOutUpperTriangle && idx[1] < idx[2])
|
| 223 |
+
self(idx) = -ck::NumericLimits<float>::Infinity();
|
| 224 |
+
});
|
| 225 |
+
|
| 226 |
+
auto ref_softmax = ReferenceSoftmaxInstance{};
|
| 227 |
+
auto ref_softmax_invoker = ref_softmax.MakeInvoker();
|
| 228 |
+
auto ref_softmax_argument = ref_softmax.MakeArgument(acc0_g_m_n, a1_g_m_n, 1, 0, {2});
|
| 229 |
+
|
| 230 |
+
ref_softmax_invoker.Run(ref_softmax_argument);
|
| 231 |
+
|
| 232 |
+
auto ref_gemm1 = ReferenceGemm1Instance{};
|
| 233 |
+
auto ref_gemm1_invoker = ref_gemm1.MakeInvoker();
|
| 234 |
+
auto ref_gemm1_argument = ref_gemm1.MakeArgument(
|
| 235 |
+
a1_g_m_n, b1_g_n_o, c_g_m_o_host_result, PassThrough{}, b1_element_op, c_element_op);
|
| 236 |
+
|
| 237 |
+
ref_gemm1_invoker.Run(ref_gemm1_argument);
|
| 238 |
+
|
| 239 |
+
// permute
|
| 240 |
+
c_gs_ms_os_host_result.ForEach([&](auto& self, auto idx) {
|
| 241 |
+
const size_t& g0 = idx[0];
|
| 242 |
+
const size_t& g1 = idx[1];
|
| 243 |
+
|
| 244 |
+
const size_t g = g0 * G1 + g1;
|
| 245 |
+
|
| 246 |
+
self(idx) = c_g_m_o_host_result(g, idx[2], idx[3]);
|
| 247 |
+
});
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
std::string best_op_name;
|
| 251 |
+
float best_ave_time = 0;
|
| 252 |
+
float best_tflops = 0;
|
| 253 |
+
float best_gb_per_sec = 0;
|
| 254 |
+
|
| 255 |
+
// profile device op instances
|
| 256 |
+
for(auto& op_ptr : op_ptrs)
|
| 257 |
+
{
|
| 258 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 259 |
+
static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
| 260 |
+
static_cast<B0DataType*>(b0_device_buf.GetDeviceBuffer()),
|
| 261 |
+
static_cast<B1DataType*>(b1_device_buf.GetDeviceBuffer()),
|
| 262 |
+
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 263 |
+
{}, // std::array<void*, 1> p_acc0_biases;
|
| 264 |
+
{}, // std::array<void*, 1> p_acc1_biases;
|
| 265 |
+
a_gs_ms_ks_lengths,
|
| 266 |
+
a_gs_ms_ks_strides,
|
| 267 |
+
b0_gs_ns_ks_lengths,
|
| 268 |
+
b0_gs_ns_ks_strides,
|
| 269 |
+
b1_gs_os_ns_lengths,
|
| 270 |
+
b1_gs_os_ns_strides,
|
| 271 |
+
c_gs_ms_os_lengths,
|
| 272 |
+
c_gs_ms_os_strides,
|
| 273 |
+
{}, // std::array<std::vector<ck::index_t>, 1>{acc0_biases_gs_ms_ns_lengths},
|
| 274 |
+
{}, // std::array<std::vector<ck::index_t>, 1>{acc0_biases_gs_ms_ns_strides},
|
| 275 |
+
{}, // std::array<std::vector<ck::index_t>, 1>{acc1_biases_gs_ms_os_lengths},
|
| 276 |
+
{}, // std::array<std::vector<ck::index_t>, 1>{acc1_biases_gs_ms_os_strides},
|
| 277 |
+
a_element_op,
|
| 278 |
+
b0_element_op,
|
| 279 |
+
acc0_element_op,
|
| 280 |
+
b1_element_op,
|
| 281 |
+
c_element_op);
|
| 282 |
+
|
| 283 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 284 |
+
|
| 285 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 286 |
+
{
|
| 287 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 288 |
+
|
| 289 |
+
float ave_time =
|
| 290 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 291 |
+
|
| 292 |
+
std::size_t flop = (size_t(M) * N * K * 2 + size_t(M) * N * O * 2) * BatchCount;
|
| 293 |
+
std::size_t num_btype = (sizeof(ADataType) * M * K + sizeof(B0DataType) * K * N +
|
| 294 |
+
sizeof(B1DataType) * N * O + sizeof(CDataType) * M * O) *
|
| 295 |
+
BatchCount;
|
| 296 |
+
|
| 297 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 298 |
+
|
| 299 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 300 |
+
|
| 301 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
| 302 |
+
<< " GB/s, " << op_name << std::endl;
|
| 303 |
+
|
| 304 |
+
if(tflops > best_tflops)
|
| 305 |
+
{
|
| 306 |
+
best_op_name = op_name;
|
| 307 |
+
best_tflops = tflops;
|
| 308 |
+
best_ave_time = ave_time;
|
| 309 |
+
best_gb_per_sec = gb_per_sec;
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
if(do_verification)
|
| 313 |
+
{
|
| 314 |
+
c_device_buf.FromDevice(c_gs_ms_os_device_result.mData.data());
|
| 315 |
+
|
| 316 |
+
// default absolute error and relative error is 0.001
|
| 317 |
+
double rtol = 1e-3;
|
| 318 |
+
double atol = 1e-3;
|
| 319 |
+
|
| 320 |
+
// when BF16 is taken, set absolute error and relative error to 0.01
|
| 321 |
+
if(std::is_same_v<ADataType, ck::bhalf_t> &&
|
| 322 |
+
std::is_same_v<B0DataType, ck::bhalf_t> &&
|
| 323 |
+
std::is_same_v<B1DataType, ck::bhalf_t> &&
|
| 324 |
+
std::is_same_v<CDataType, ck::bhalf_t>)
|
| 325 |
+
{
|
| 326 |
+
rtol = 1e-2;
|
| 327 |
+
atol = 1e-2;
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
pass = pass & ck::utils::check_err(c_gs_ms_os_device_result,
|
| 331 |
+
c_gs_ms_os_host_result,
|
| 332 |
+
"Error: Incorrect results!",
|
| 333 |
+
rtol,
|
| 334 |
+
atol);
|
| 335 |
+
|
| 336 |
+
if(do_log)
|
| 337 |
+
{
|
| 338 |
+
LogRangeAsType<float>(std::cout << "a_gs_ms_ks: ", a_gs_ms_ks.mData, ",")
|
| 339 |
+
<< std::endl;
|
| 340 |
+
LogRangeAsType<float>(std::cout << "b0_gs_ns_ks : ", b0_gs_ns_ks.mData, ",")
|
| 341 |
+
<< std::endl;
|
| 342 |
+
LogRangeAsType<float>(std::cout << "b1_gs_os_ns : ", b1_gs_os_ns.mData, ",")
|
| 343 |
+
<< std::endl;
|
| 344 |
+
LogRangeAsType<float>(
|
| 345 |
+
std::cout << "c_gs_ms_os_host_result : ", c_gs_ms_os_host_result.mData, ",")
|
| 346 |
+
<< std::endl;
|
| 347 |
+
LogRangeAsType<float>(std::cout << "c_gs_ms_os_device_result : ",
|
| 348 |
+
c_gs_ms_os_device_result.mData,
|
| 349 |
+
",")
|
| 350 |
+
<< std::endl;
|
| 351 |
+
}
|
| 352 |
+
}
|
| 353 |
+
}
|
| 354 |
+
else
|
| 355 |
+
{
|
| 356 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 357 |
+
}
|
| 358 |
+
}
|
| 359 |
+
|
| 360 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 361 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
| 362 |
+
|
| 363 |
+
return pass;
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
} // namespace profiler
|
| 367 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batchnorm_backward_impl.hpp
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <stdexcept>
|
| 8 |
+
|
| 9 |
+
#include "ck/ck.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 11 |
+
#include "ck/library/utility/check_err.hpp"
|
| 12 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 13 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 14 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/batchnorm_backward.hpp"
|
| 16 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batchnorm_backward.hpp"
|
| 17 |
+
|
| 18 |
+
namespace ck {
|
| 19 |
+
namespace profiler {
|
| 20 |
+
|
| 21 |
+
template <typename XDataType,
|
| 22 |
+
typename DxDataType,
|
| 23 |
+
typename DyDataType,
|
| 24 |
+
typename AccDataType,
|
| 25 |
+
typename ScaleDataType,
|
| 26 |
+
typename DscaleDbiasDataType,
|
| 27 |
+
typename MeanVarDataType,
|
| 28 |
+
index_t Rank,
|
| 29 |
+
index_t NumBatchNormReduceDim>
|
| 30 |
+
bool profile_batchnorm_backward_impl(bool do_verification,
|
| 31 |
+
int init_method,
|
| 32 |
+
bool do_dumpout,
|
| 33 |
+
bool time_kernel,
|
| 34 |
+
const std::vector<size_t> inOutLengths,
|
| 35 |
+
const std::vector<int> reduceDims,
|
| 36 |
+
bool haveSavedMeanInvVar,
|
| 37 |
+
double epsilon)
|
| 38 |
+
{
|
| 39 |
+
if(inOutLengths.size() != Rank || reduceDims.size() != NumBatchNormReduceDim)
|
| 40 |
+
{
|
| 41 |
+
throw std::runtime_error("Invalid tensor lengths or number of reduce dimensions!");
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
std::vector<size_t> scaleBiasMeanVarLengths;
|
| 45 |
+
|
| 46 |
+
// used for calculating the effective transferred bytes by each operation
|
| 47 |
+
size_t total_length;
|
| 48 |
+
size_t invariant_length = 1;
|
| 49 |
+
|
| 50 |
+
total_length =
|
| 51 |
+
std::accumulate(inOutLengths.begin(), inOutLengths.end(), 1, std::multiplies<size_t>{});
|
| 52 |
+
|
| 53 |
+
if(std::any_of(reduceDims.begin(), reduceDims.end(), [](int d) { return d < 0 || d >= Rank; }))
|
| 54 |
+
throw std::runtime_error("Invalid reduce dimensions!");
|
| 55 |
+
|
| 56 |
+
for(int dim = 0; dim < Rank; dim++)
|
| 57 |
+
{
|
| 58 |
+
if(std::none_of(reduceDims.begin(), reduceDims.end(), [&](int d) { return dim == d; }))
|
| 59 |
+
{
|
| 60 |
+
scaleBiasMeanVarLengths.push_back(inOutLengths[dim]);
|
| 61 |
+
invariant_length *= inOutLengths[dim];
|
| 62 |
+
};
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
// input data of the batchnorm backward algorithm
|
| 66 |
+
Tensor<XDataType> x(inOutLengths);
|
| 67 |
+
Tensor<DyDataType> dy(inOutLengths);
|
| 68 |
+
Tensor<ScaleDataType> bnScale(scaleBiasMeanVarLengths);
|
| 69 |
+
|
| 70 |
+
Tensor<MeanVarDataType> savedMean(scaleBiasMeanVarLengths);
|
| 71 |
+
Tensor<MeanVarDataType> savedInvVar(scaleBiasMeanVarLengths);
|
| 72 |
+
// savedVariance is only used for initializing savedInvVar
|
| 73 |
+
Tensor<MeanVarDataType> savedVariance(scaleBiasMeanVarLengths);
|
| 74 |
+
|
| 75 |
+
// output data of the batchnorm backward algorithm
|
| 76 |
+
Tensor<DxDataType> dx_ref(inOutLengths);
|
| 77 |
+
Tensor<DxDataType> dx(inOutLengths);
|
| 78 |
+
|
| 79 |
+
Tensor<DscaleDbiasDataType> dscale(scaleBiasMeanVarLengths);
|
| 80 |
+
Tensor<DscaleDbiasDataType> dbias(scaleBiasMeanVarLengths);
|
| 81 |
+
|
| 82 |
+
Tensor<DscaleDbiasDataType> dscale_ref(scaleBiasMeanVarLengths);
|
| 83 |
+
Tensor<DscaleDbiasDataType> dbias_ref(scaleBiasMeanVarLengths);
|
| 84 |
+
|
| 85 |
+
auto inOutStrides = x.mDesc.GetStrides();
|
| 86 |
+
auto scaleBiasMeanVarStrides = bnScale.mDesc.GetStrides();
|
| 87 |
+
|
| 88 |
+
std::size_t num_thread = std::thread::hardware_concurrency();
|
| 89 |
+
|
| 90 |
+
if(haveSavedMeanInvVar)
|
| 91 |
+
{
|
| 92 |
+
const float x_mean = 0.0f;
|
| 93 |
+
const float x_stddev = 1.0f;
|
| 94 |
+
const float noise_stddev = 0.0001f;
|
| 95 |
+
|
| 96 |
+
// input data in normal distribution
|
| 97 |
+
x.GenerateTensorValue(GeneratorTensor_4<XDataType>{x_mean, x_stddev}, num_thread);
|
| 98 |
+
|
| 99 |
+
// initialize the savedMean to be values with tiny variation to the mean of the x values
|
| 100 |
+
savedMean.GenerateTensorValue(GeneratorTensor_4<MeanVarDataType>{x_mean, noise_stddev},
|
| 101 |
+
num_thread);
|
| 102 |
+
|
| 103 |
+
// initialize the variance to be values with tiny variation to the variance of the x values
|
| 104 |
+
savedVariance.GenerateTensorValue(
|
| 105 |
+
GeneratorTensor_4<MeanVarDataType>{x_stddev * x_stddev, noise_stddev}, num_thread);
|
| 106 |
+
|
| 107 |
+
auto it_src = savedVariance.mData.begin();
|
| 108 |
+
auto it_dst = savedInvVar.mData.begin();
|
| 109 |
+
float tmp_epsilon = std::numeric_limits<float>::epsilon();
|
| 110 |
+
|
| 111 |
+
while(it_src != savedVariance.mData.end())
|
| 112 |
+
{
|
| 113 |
+
*it_dst = type_convert<AccDataType>(
|
| 114 |
+
1.0f / std::sqrtf(type_convert<float>(*it_src) + tmp_epsilon));
|
| 115 |
+
|
| 116 |
+
it_src++;
|
| 117 |
+
it_dst++;
|
| 118 |
+
};
|
| 119 |
+
}
|
| 120 |
+
else
|
| 121 |
+
{
|
| 122 |
+
const float x_mean = 0.0f;
|
| 123 |
+
const float x_stddev = 1.0f;
|
| 124 |
+
|
| 125 |
+
// input data in normal distribution
|
| 126 |
+
x.GenerateTensorValue(GeneratorTensor_4<XDataType>{x_mean, x_stddev}, num_thread);
|
| 127 |
+
};
|
| 128 |
+
|
| 129 |
+
if(do_verification)
|
| 130 |
+
{
|
| 131 |
+
switch(init_method)
|
| 132 |
+
{
|
| 133 |
+
case 0:
|
| 134 |
+
dy.GenerateTensorValue(GeneratorTensor_0<DyDataType>{}, num_thread);
|
| 135 |
+
bnScale.GenerateTensorValue(GeneratorTensor_0<ScaleDataType>{}, num_thread);
|
| 136 |
+
break;
|
| 137 |
+
case 1:
|
| 138 |
+
dy.GenerateTensorValue(GeneratorTensor_1<DyDataType>{1}, num_thread);
|
| 139 |
+
bnScale.GenerateTensorValue(GeneratorTensor_1<ScaleDataType>{1}, num_thread);
|
| 140 |
+
break;
|
| 141 |
+
case 2:
|
| 142 |
+
dy.GenerateTensorValue(GeneratorTensor_2<DyDataType>{-2, 2}, num_thread);
|
| 143 |
+
bnScale.GenerateTensorValue(GeneratorTensor_2<ScaleDataType>{-5, 5}, num_thread);
|
| 144 |
+
break;
|
| 145 |
+
default:
|
| 146 |
+
dy.GenerateTensorValue(GeneratorTensor_3<DyDataType>{-0.2f, 0.2f}, num_thread);
|
| 147 |
+
bnScale.GenerateTensorValue(GeneratorTensor_3<ScaleDataType>{-0.5f, 0.5f}, num_thread);
|
| 148 |
+
}
|
| 149 |
+
};
|
| 150 |
+
|
| 151 |
+
// input data of the batchnorm backward algorithm
|
| 152 |
+
DeviceMem x_dev(sizeof(XDataType) * x.mDesc.GetElementSpaceSize());
|
| 153 |
+
DeviceMem dy_dev(sizeof(DyDataType) * dy.mDesc.GetElementSpaceSize());
|
| 154 |
+
|
| 155 |
+
DeviceMem bnScale_dev(sizeof(ScaleDataType) * bnScale.mDesc.GetElementSpaceSize());
|
| 156 |
+
|
| 157 |
+
DeviceMem savedMean_dev(sizeof(MeanVarDataType) * savedMean.mDesc.GetElementSpaceSize());
|
| 158 |
+
DeviceMem savedInvVar_dev(sizeof(MeanVarDataType) * savedInvVar.mDesc.GetElementSpaceSize());
|
| 159 |
+
|
| 160 |
+
// output data of the batchnorm backward algorithm
|
| 161 |
+
DeviceMem dx_dev(sizeof(DxDataType) * dx.mDesc.GetElementSpaceSize());
|
| 162 |
+
|
| 163 |
+
DeviceMem dscale_dev(sizeof(DscaleDbiasDataType) * dscale.mDesc.GetElementSpaceSize());
|
| 164 |
+
DeviceMem dbias_dev(sizeof(DscaleDbiasDataType) * dbias.mDesc.GetElementSpaceSize());
|
| 165 |
+
|
| 166 |
+
x_dev.ToDevice(x.mData.data());
|
| 167 |
+
dy_dev.ToDevice(dy.mData.data());
|
| 168 |
+
bnScale_dev.ToDevice(bnScale.mData.data());
|
| 169 |
+
|
| 170 |
+
if(haveSavedMeanInvVar)
|
| 171 |
+
{
|
| 172 |
+
savedMean_dev.ToDevice(savedMean.mData.data());
|
| 173 |
+
savedInvVar_dev.ToDevice(savedInvVar.mData.data());
|
| 174 |
+
};
|
| 175 |
+
|
| 176 |
+
std::array<index_t, Rank> arrInOutLengths;
|
| 177 |
+
std::array<index_t, Rank> arrInOutStrides;
|
| 178 |
+
std::array<index_t, Rank - NumBatchNormReduceDim> arrScaleBiasMeanVarLengths;
|
| 179 |
+
std::array<index_t, Rank - NumBatchNormReduceDim> arrScaleBiasMeanVarStrides;
|
| 180 |
+
std::array<int, NumBatchNormReduceDim> arrReduceDims;
|
| 181 |
+
|
| 182 |
+
std::copy(inOutLengths.begin(), inOutLengths.end(), arrInOutLengths.begin());
|
| 183 |
+
std::copy(inOutStrides.begin(), inOutStrides.end(), arrInOutStrides.begin());
|
| 184 |
+
std::copy(scaleBiasMeanVarLengths.begin(),
|
| 185 |
+
scaleBiasMeanVarLengths.end(),
|
| 186 |
+
arrScaleBiasMeanVarLengths.begin());
|
| 187 |
+
std::copy(scaleBiasMeanVarStrides.begin(),
|
| 188 |
+
scaleBiasMeanVarStrides.end(),
|
| 189 |
+
arrScaleBiasMeanVarStrides.begin());
|
| 190 |
+
|
| 191 |
+
std::copy(reduceDims.begin(), reduceDims.end(), arrReduceDims.begin());
|
| 192 |
+
|
| 193 |
+
using PassThroughOp = ck::tensor_operation::element_wise::PassThrough;
|
| 194 |
+
|
| 195 |
+
// add device batchnorm-backward instances
|
| 196 |
+
using DeviceOp = ck::tensor_operation::device::DeviceBatchNormBwd<XDataType,
|
| 197 |
+
DxDataType,
|
| 198 |
+
DxDataType,
|
| 199 |
+
AccDataType,
|
| 200 |
+
ScaleDataType,
|
| 201 |
+
DscaleDbiasDataType,
|
| 202 |
+
MeanVarDataType,
|
| 203 |
+
PassThroughOp,
|
| 204 |
+
Rank,
|
| 205 |
+
NumBatchNormReduceDim>;
|
| 206 |
+
|
| 207 |
+
// get device op instances
|
| 208 |
+
const auto instance_ptrs =
|
| 209 |
+
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 210 |
+
DeviceOp>::GetInstances();
|
| 211 |
+
|
| 212 |
+
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
|
| 213 |
+
|
| 214 |
+
std::string best_instance_name;
|
| 215 |
+
float best_avg_time = std::numeric_limits<float>::max();
|
| 216 |
+
float best_gb_per_sec = 0;
|
| 217 |
+
|
| 218 |
+
if(do_verification)
|
| 219 |
+
{
|
| 220 |
+
using ReferenceBatchNormBwdInstance =
|
| 221 |
+
ck::tensor_operation::host::ReferenceBatchNormBwd<XDataType,
|
| 222 |
+
DxDataType,
|
| 223 |
+
DyDataType,
|
| 224 |
+
AccDataType,
|
| 225 |
+
ScaleDataType,
|
| 226 |
+
DscaleDbiasDataType,
|
| 227 |
+
MeanVarDataType,
|
| 228 |
+
PassThroughOp,
|
| 229 |
+
Rank,
|
| 230 |
+
NumBatchNormReduceDim>;
|
| 231 |
+
|
| 232 |
+
auto batchNormBwd_ref = ReferenceBatchNormBwdInstance{};
|
| 233 |
+
|
| 234 |
+
auto argument_ptr_ref = batchNormBwd_ref.MakeArgumentPointer(
|
| 235 |
+
arrInOutLengths,
|
| 236 |
+
arrInOutStrides,
|
| 237 |
+
arrInOutStrides,
|
| 238 |
+
arrInOutStrides,
|
| 239 |
+
arrReduceDims,
|
| 240 |
+
arrScaleBiasMeanVarLengths,
|
| 241 |
+
arrScaleBiasMeanVarStrides,
|
| 242 |
+
arrScaleBiasMeanVarStrides,
|
| 243 |
+
arrScaleBiasMeanVarStrides,
|
| 244 |
+
x.mData.data(),
|
| 245 |
+
dy.mData.data(),
|
| 246 |
+
bnScale.mData.data(),
|
| 247 |
+
haveSavedMeanInvVar ? savedMean.mData.data() : nullptr,
|
| 248 |
+
haveSavedMeanInvVar ? savedInvVar.mData.data() : nullptr,
|
| 249 |
+
epsilon,
|
| 250 |
+
PassThroughOp{},
|
| 251 |
+
dx_ref.mData.data(),
|
| 252 |
+
dscale_ref.mData.data(),
|
| 253 |
+
dbias_ref.mData.data());
|
| 254 |
+
|
| 255 |
+
if(!batchNormBwd_ref.IsSupportedArgument(argument_ptr_ref.get()))
|
| 256 |
+
{
|
| 257 |
+
std::cout << "The runtime parameters not supported by the reference instance, exiting!"
|
| 258 |
+
<< std::endl;
|
| 259 |
+
return (false);
|
| 260 |
+
};
|
| 261 |
+
|
| 262 |
+
auto invoker_ptr_ref = batchNormBwd_ref.MakeInvokerPointer();
|
| 263 |
+
|
| 264 |
+
(void)invoker_ptr_ref->Run(argument_ptr_ref.get());
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
int num_kernel = 0;
|
| 268 |
+
bool pass = true;
|
| 269 |
+
|
| 270 |
+
for(auto& inst_ptr : instance_ptrs)
|
| 271 |
+
{
|
| 272 |
+
auto argument_ptr = inst_ptr->MakeArgumentPointer(
|
| 273 |
+
arrInOutLengths,
|
| 274 |
+
arrInOutStrides,
|
| 275 |
+
arrInOutStrides,
|
| 276 |
+
arrInOutStrides,
|
| 277 |
+
arrReduceDims,
|
| 278 |
+
arrScaleBiasMeanVarLengths,
|
| 279 |
+
arrScaleBiasMeanVarStrides,
|
| 280 |
+
arrScaleBiasMeanVarStrides,
|
| 281 |
+
arrScaleBiasMeanVarStrides,
|
| 282 |
+
x_dev.GetDeviceBuffer(),
|
| 283 |
+
dy_dev.GetDeviceBuffer(),
|
| 284 |
+
bnScale_dev.GetDeviceBuffer(),
|
| 285 |
+
haveSavedMeanInvVar ? savedMean_dev.GetDeviceBuffer() : nullptr,
|
| 286 |
+
haveSavedMeanInvVar ? savedInvVar_dev.GetDeviceBuffer() : nullptr,
|
| 287 |
+
epsilon,
|
| 288 |
+
PassThroughOp{},
|
| 289 |
+
dx_dev.GetDeviceBuffer(),
|
| 290 |
+
dscale_dev.GetDeviceBuffer(),
|
| 291 |
+
dbias_dev.GetDeviceBuffer());
|
| 292 |
+
|
| 293 |
+
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 294 |
+
{
|
| 295 |
+
num_kernel++;
|
| 296 |
+
}
|
| 297 |
+
else
|
| 298 |
+
{
|
| 299 |
+
if(time_kernel)
|
| 300 |
+
{
|
| 301 |
+
std::cout << inst_ptr->GetTypeString()
|
| 302 |
+
<< " skipped due to unsupported argument: " << std::endl;
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
continue;
|
| 306 |
+
};
|
| 307 |
+
|
| 308 |
+
size_t workspace_sz = inst_ptr->GetWorkSpaceSize(argument_ptr.get());
|
| 309 |
+
|
| 310 |
+
DeviceMem workspace_dev(workspace_sz);
|
| 311 |
+
|
| 312 |
+
inst_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_dev.GetDeviceBuffer());
|
| 313 |
+
|
| 314 |
+
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
|
| 315 |
+
|
| 316 |
+
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 317 |
+
|
| 318 |
+
size_t num_bytes = 0;
|
| 319 |
+
|
| 320 |
+
// inputing of x, dy, scale, outputing of dx, dscale, dbias
|
| 321 |
+
num_bytes += total_length * (sizeof(XDataType) + sizeof(DyDataType) + sizeof(DxDataType)) +
|
| 322 |
+
invariant_length * sizeof(DscaleDbiasDataType) * 2;
|
| 323 |
+
|
| 324 |
+
// inputting of savedMean, savedInvVariance
|
| 325 |
+
if(haveSavedMeanInvVar)
|
| 326 |
+
num_bytes += invariant_length * sizeof(MeanVarDataType) * 2;
|
| 327 |
+
|
| 328 |
+
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
| 329 |
+
|
| 330 |
+
if(time_kernel)
|
| 331 |
+
std::cout << "Perf: " << avg_time << " ms, " << gb_per_sec << " GB/s, "
|
| 332 |
+
<< inst_ptr->GetTypeString() << std::endl;
|
| 333 |
+
|
| 334 |
+
if(avg_time < best_avg_time)
|
| 335 |
+
{
|
| 336 |
+
best_instance_name = inst_ptr->GetTypeString();
|
| 337 |
+
best_avg_time = avg_time;
|
| 338 |
+
best_gb_per_sec = gb_per_sec;
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
if(do_verification)
|
| 342 |
+
{
|
| 343 |
+
using ck::utils::check_err;
|
| 344 |
+
bool single_pass = true;
|
| 345 |
+
|
| 346 |
+
dx_dev.FromDevice(dx.mData.data());
|
| 347 |
+
dscale_dev.FromDevice(dscale.data());
|
| 348 |
+
dbias_dev.FromDevice(dbias.data());
|
| 349 |
+
|
| 350 |
+
// clang-format off
|
| 351 |
+
single_pass = single_pass && ck::utils::check_err(dx.mData, dx_ref.mData, "dx result:", 5e-4, 5e-4);
|
| 352 |
+
single_pass = single_pass && ck::utils::check_err(dscale.mData, dscale_ref.mData, "dScale result:", 3e-3, 3e-3);
|
| 353 |
+
single_pass = single_pass && ck::utils::check_err(dbias.mData, dbias_ref.mData, "dBias result:", 3e-3, 3e-3);
|
| 354 |
+
// clang-format on
|
| 355 |
+
|
| 356 |
+
pass = pass && single_pass;
|
| 357 |
+
};
|
| 358 |
+
|
| 359 |
+
if(do_dumpout)
|
| 360 |
+
{
|
| 361 |
+
using ck::host_common::dumpBufferToFile;
|
| 362 |
+
|
| 363 |
+
// clang-format off
|
| 364 |
+
dumpBufferToFile("dump_x.bin", x.mData.data(), x.mDesc.GetElementSize());
|
| 365 |
+
dumpBufferToFile("dump_dy.bin", dy.mData.data(), dy.mDesc.GetElementSize());
|
| 366 |
+
dumpBufferToFile("dump_dx.bin", dx.mData.data(), dx.mDesc.GetElementSize());
|
| 367 |
+
dumpBufferToFile("dump_dx_ref.bin", dx_ref.mData.data(), dx_ref.mDesc.GetElementSize());
|
| 368 |
+
dumpBufferToFile("dump_dscale.bin", dscale.mData.data(), dscale.mDesc.GetElementSize());
|
| 369 |
+
dumpBufferToFile("dump_dscale_ref.bin", dscale_ref.mData.data(), dscale_ref.mDesc.GetElementSize());
|
| 370 |
+
// clang-format off
|
| 371 |
+
};
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
if(time_kernel)
|
| 375 |
+
{
|
| 376 |
+
std::cout << "best perf = " << best_avg_time << " ms, " << best_gb_per_sec << " GB/s, "
|
| 377 |
+
<< best_instance_name << std::endl;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
if(num_kernel == 0)
|
| 381 |
+
{
|
| 382 |
+
std::cout << "Error: No kernel is applicable" << std::endl;
|
| 383 |
+
return false;
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
return pass;
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
} // namespace profiler
|
| 390 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_batchnorm_forward_impl.hpp
ADDED
|
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <stdexcept>
|
| 8 |
+
|
| 9 |
+
#include "ck/ck.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 11 |
+
#include "ck/library/utility/check_err.hpp"
|
| 12 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 13 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 14 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/batchnorm_forward.hpp"
|
| 16 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batchnorm_forward.hpp"
|
| 17 |
+
|
| 18 |
+
namespace ck {
|
| 19 |
+
namespace profiler {
|
| 20 |
+
|
| 21 |
+
template <typename XDataType,
|
| 22 |
+
typename YDataType,
|
| 23 |
+
typename AccDataType,
|
| 24 |
+
typename ScaleDataType,
|
| 25 |
+
typename BiasDataType,
|
| 26 |
+
typename MeanVarDataType,
|
| 27 |
+
index_t Rank,
|
| 28 |
+
index_t NumBatchNormReduceDim>
|
| 29 |
+
bool profile_batchnorm_forward_impl(int do_verification,
|
| 30 |
+
int init_method,
|
| 31 |
+
bool do_dumpout,
|
| 32 |
+
bool time_kernel,
|
| 33 |
+
const std::vector<size_t> inOutLengths,
|
| 34 |
+
const std::vector<int> reduceDims,
|
| 35 |
+
bool updateMovingAverage,
|
| 36 |
+
bool saveMeanAndInvVariance,
|
| 37 |
+
double averageFactor,
|
| 38 |
+
double epsilon)
|
| 39 |
+
{
|
| 40 |
+
if(inOutLengths.size() != Rank || reduceDims.size() != NumBatchNormReduceDim)
|
| 41 |
+
{
|
| 42 |
+
throw std::runtime_error("Invalid tensor lengths or number of reduce dimensions!");
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
std::vector<size_t> scaleBiasMeanVarLengths;
|
| 46 |
+
|
| 47 |
+
// used for calculating the effective transferred bytes by each operation
|
| 48 |
+
size_t total_length;
|
| 49 |
+
size_t invariant_length = 1;
|
| 50 |
+
|
| 51 |
+
total_length =
|
| 52 |
+
std::accumulate(inOutLengths.begin(), inOutLengths.end(), 1, std::multiplies<size_t>{});
|
| 53 |
+
|
| 54 |
+
if(std::any_of(reduceDims.begin(), reduceDims.end(), [](int d) { return d < 0 || d >= Rank; }))
|
| 55 |
+
throw std::runtime_error("Invalid reduce dimensions!");
|
| 56 |
+
|
| 57 |
+
for(int dim = 0; dim < Rank; dim++)
|
| 58 |
+
{
|
| 59 |
+
if(std::none_of(reduceDims.begin(), reduceDims.end(), [&](int d) { return dim == d; }))
|
| 60 |
+
{
|
| 61 |
+
scaleBiasMeanVarLengths.push_back(inOutLengths[dim]);
|
| 62 |
+
invariant_length *= inOutLengths[dim];
|
| 63 |
+
};
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
// input data of the batchnorm forward algorithm
|
| 67 |
+
Tensor<XDataType> x(inOutLengths);
|
| 68 |
+
Tensor<ScaleDataType> bnScale(scaleBiasMeanVarLengths);
|
| 69 |
+
Tensor<BiasDataType> bnBias(scaleBiasMeanVarLengths);
|
| 70 |
+
|
| 71 |
+
// output data of the batchnorm forward algorithm
|
| 72 |
+
Tensor<YDataType> y_ref(inOutLengths);
|
| 73 |
+
Tensor<YDataType> y(inOutLengths);
|
| 74 |
+
|
| 75 |
+
Tensor<MeanVarDataType> resultSaveMean_ref(scaleBiasMeanVarLengths);
|
| 76 |
+
Tensor<MeanVarDataType> resultSaveInvVariance_ref(scaleBiasMeanVarLengths);
|
| 77 |
+
|
| 78 |
+
Tensor<MeanVarDataType> resultRunningMean_ref(scaleBiasMeanVarLengths);
|
| 79 |
+
Tensor<MeanVarDataType> resultRunningVariance_ref(scaleBiasMeanVarLengths);
|
| 80 |
+
|
| 81 |
+
auto inOutStrides = x.mDesc.GetStrides();
|
| 82 |
+
auto scaleBiasMeanVarStrides = bnScale.mDesc.GetStrides();
|
| 83 |
+
|
| 84 |
+
std::size_t num_thread = std::thread::hardware_concurrency();
|
| 85 |
+
|
| 86 |
+
if(updateMovingAverage)
|
| 87 |
+
{
|
| 88 |
+
const float x_mean = 0.0f;
|
| 89 |
+
const float x_stddev = 1.0f;
|
| 90 |
+
const float noise_stddev = 0.04f;
|
| 91 |
+
|
| 92 |
+
// input data in normal distribution
|
| 93 |
+
x.GenerateTensorValue(GeneratorTensor_4<XDataType>{x_mean, x_stddev}, num_thread);
|
| 94 |
+
|
| 95 |
+
// initialize the runningMean to be values with tiny variation to the mean of the x
|
| 96 |
+
// values
|
| 97 |
+
resultRunningMean_ref.GenerateTensorValue(
|
| 98 |
+
GeneratorTensor_4<MeanVarDataType>{x_mean, noise_stddev}, num_thread);
|
| 99 |
+
|
| 100 |
+
// initialize the runningVariance to be values with tiny variation to the variance of
|
| 101 |
+
// the x values
|
| 102 |
+
resultRunningVariance_ref.GenerateTensorValue(
|
| 103 |
+
GeneratorTensor_4<MeanVarDataType>{x_stddev * x_stddev, noise_stddev}, num_thread);
|
| 104 |
+
}
|
| 105 |
+
else
|
| 106 |
+
{
|
| 107 |
+
if constexpr(ck::is_same_v<XDataType, int8_t>)
|
| 108 |
+
x.GenerateTensorValue(GeneratorTensor_2<XDataType>{-5, 5}, num_thread);
|
| 109 |
+
else
|
| 110 |
+
x.GenerateTensorValue(GeneratorTensor_3<XDataType>{-1.0f, 1.0f}, num_thread);
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
if(do_verification)
|
| 114 |
+
{
|
| 115 |
+
switch(init_method)
|
| 116 |
+
{
|
| 117 |
+
case 0:
|
| 118 |
+
bnScale.GenerateTensorValue(GeneratorTensor_0<ScaleDataType>{}, num_thread);
|
| 119 |
+
bnBias.GenerateTensorValue(GeneratorTensor_0<BiasDataType>{}, num_thread);
|
| 120 |
+
break;
|
| 121 |
+
case 1:
|
| 122 |
+
bnScale.GenerateTensorValue(GeneratorTensor_1<ScaleDataType>{1}, num_thread);
|
| 123 |
+
bnBias.GenerateTensorValue(GeneratorTensor_1<BiasDataType>{0}, num_thread);
|
| 124 |
+
break;
|
| 125 |
+
case 2:
|
| 126 |
+
bnScale.GenerateTensorValue(GeneratorTensor_2<ScaleDataType>{-5, 5}, num_thread);
|
| 127 |
+
bnBias.GenerateTensorValue(GeneratorTensor_2<BiasDataType>{-5, 5}, num_thread);
|
| 128 |
+
break;
|
| 129 |
+
default:
|
| 130 |
+
bnScale.GenerateTensorValue(GeneratorTensor_3<ScaleDataType>{-1.0f, 1.0f}, num_thread);
|
| 131 |
+
bnBias.GenerateTensorValue(GeneratorTensor_3<BiasDataType>{-1.0f, 1.0f}, num_thread);
|
| 132 |
+
}
|
| 133 |
+
};
|
| 134 |
+
|
| 135 |
+
// these buffers are usually provided by the user application
|
| 136 |
+
DeviceMem x_dev(sizeof(XDataType) * x.mDesc.GetElementSpaceSize());
|
| 137 |
+
DeviceMem y_dev(sizeof(XDataType) * y.mDesc.GetElementSpaceSize());
|
| 138 |
+
DeviceMem bnScale_dev(sizeof(ScaleDataType) * bnScale.mDesc.GetElementSpaceSize());
|
| 139 |
+
DeviceMem bnBias_dev(sizeof(BiasDataType) * bnBias.mDesc.GetElementSpaceSize());
|
| 140 |
+
|
| 141 |
+
// mean_dev or resultSaveMean_dev
|
| 142 |
+
DeviceMem resultSaveMean_dev(sizeof(MeanVarDataType) *
|
| 143 |
+
resultSaveMean_ref.mDesc.GetElementSpaceSize());
|
| 144 |
+
// meansquare_dev or resultSaveInvVariance_dev
|
| 145 |
+
DeviceMem resultSaveInvVariance_dev(sizeof(MeanVarDataType) *
|
| 146 |
+
resultSaveInvVariance_ref.mDesc.GetElementSpaceSize());
|
| 147 |
+
// resultRunningMean_dev
|
| 148 |
+
DeviceMem resultRunningMean_dev(sizeof(MeanVarDataType) *
|
| 149 |
+
resultRunningMean_ref.mDesc.GetElementSpaceSize());
|
| 150 |
+
// resultRunningVariance_dev
|
| 151 |
+
DeviceMem resultRunningVariance_dev(sizeof(MeanVarDataType) *
|
| 152 |
+
resultRunningVariance_ref.mDesc.GetElementSpaceSize());
|
| 153 |
+
|
| 154 |
+
x_dev.ToDevice(x.mData.data());
|
| 155 |
+
bnScale_dev.ToDevice(bnScale.mData.data());
|
| 156 |
+
bnBias_dev.ToDevice(bnBias.mData.data());
|
| 157 |
+
|
| 158 |
+
if(updateMovingAverage)
|
| 159 |
+
{
|
| 160 |
+
resultRunningMean_dev.ToDevice(resultRunningMean_ref.mData.data());
|
| 161 |
+
resultRunningVariance_dev.ToDevice(resultRunningVariance_ref.mData.data());
|
| 162 |
+
};
|
| 163 |
+
|
| 164 |
+
// used for storing the device result for verification when updateMovingAverage is enabled
|
| 165 |
+
Tensor<MeanVarDataType> resultRunningMean(scaleBiasMeanVarLengths);
|
| 166 |
+
Tensor<MeanVarDataType> resultRunningVariance(scaleBiasMeanVarLengths);
|
| 167 |
+
|
| 168 |
+
// used for storing the device result for verification when saveMeanAndInvVariance is enabled
|
| 169 |
+
Tensor<MeanVarDataType> resultSaveMean(scaleBiasMeanVarLengths);
|
| 170 |
+
Tensor<MeanVarDataType> resultSaveInvVariance(scaleBiasMeanVarLengths);
|
| 171 |
+
|
| 172 |
+
std::array<index_t, Rank> arrInOutLengths;
|
| 173 |
+
std::array<index_t, Rank> arrInOutStrides;
|
| 174 |
+
std::array<index_t, Rank - NumBatchNormReduceDim> arrScaleBiasMeanVarLengths;
|
| 175 |
+
std::array<index_t, Rank - NumBatchNormReduceDim> arrScaleBiasMeanVarStrides;
|
| 176 |
+
std::array<int, NumBatchNormReduceDim> arrReduceDims;
|
| 177 |
+
|
| 178 |
+
std::copy(inOutLengths.begin(), inOutLengths.end(), arrInOutLengths.begin());
|
| 179 |
+
std::copy(inOutStrides.begin(), inOutStrides.end(), arrInOutStrides.begin());
|
| 180 |
+
std::copy(scaleBiasMeanVarLengths.begin(),
|
| 181 |
+
scaleBiasMeanVarLengths.end(),
|
| 182 |
+
arrScaleBiasMeanVarLengths.begin());
|
| 183 |
+
std::copy(scaleBiasMeanVarStrides.begin(),
|
| 184 |
+
scaleBiasMeanVarStrides.end(),
|
| 185 |
+
arrScaleBiasMeanVarStrides.begin());
|
| 186 |
+
|
| 187 |
+
std::copy(reduceDims.begin(), reduceDims.end(), arrReduceDims.begin());
|
| 188 |
+
|
| 189 |
+
using PassThroughOp = ck::tensor_operation::element_wise::PassThrough;
|
| 190 |
+
|
| 191 |
+
// add device batchnorm-forward instances
|
| 192 |
+
using DeviceOp = ck::tensor_operation::device::DeviceBatchNormFwd<XDataType,
|
| 193 |
+
YDataType,
|
| 194 |
+
AccDataType,
|
| 195 |
+
ScaleDataType,
|
| 196 |
+
BiasDataType,
|
| 197 |
+
MeanVarDataType,
|
| 198 |
+
PassThroughOp,
|
| 199 |
+
Rank,
|
| 200 |
+
NumBatchNormReduceDim>;
|
| 201 |
+
|
| 202 |
+
// get device op instances
|
| 203 |
+
const auto instance_ptrs =
|
| 204 |
+
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 205 |
+
DeviceOp>::GetInstances();
|
| 206 |
+
|
| 207 |
+
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
|
| 208 |
+
|
| 209 |
+
std::string best_instance_name;
|
| 210 |
+
float best_avg_time = std::numeric_limits<float>::max();
|
| 211 |
+
float best_gb_per_sec = 0;
|
| 212 |
+
|
| 213 |
+
if(do_verification)
|
| 214 |
+
{
|
| 215 |
+
using ReferenceBatchNormFwdInstance =
|
| 216 |
+
ck::tensor_operation::host::ReferenceBatchNormFwd<XDataType,
|
| 217 |
+
YDataType,
|
| 218 |
+
AccDataType,
|
| 219 |
+
ScaleDataType,
|
| 220 |
+
BiasDataType,
|
| 221 |
+
MeanVarDataType,
|
| 222 |
+
PassThroughOp,
|
| 223 |
+
Rank,
|
| 224 |
+
NumBatchNormReduceDim>;
|
| 225 |
+
|
| 226 |
+
auto batchNormFwd_ref = ReferenceBatchNormFwdInstance{};
|
| 227 |
+
|
| 228 |
+
auto argument_ptr_ref = batchNormFwd_ref.MakeArgumentPointer(
|
| 229 |
+
arrInOutLengths,
|
| 230 |
+
arrInOutStrides,
|
| 231 |
+
arrInOutStrides,
|
| 232 |
+
arrReduceDims,
|
| 233 |
+
arrScaleBiasMeanVarLengths,
|
| 234 |
+
arrScaleBiasMeanVarStrides,
|
| 235 |
+
arrScaleBiasMeanVarStrides,
|
| 236 |
+
arrScaleBiasMeanVarStrides,
|
| 237 |
+
x.mData.data(),
|
| 238 |
+
bnScale.mData.data(),
|
| 239 |
+
bnBias.mData.data(),
|
| 240 |
+
epsilon,
|
| 241 |
+
PassThroughOp{},
|
| 242 |
+
y_ref.mData.data(),
|
| 243 |
+
saveMeanAndInvVariance ? resultSaveMean_ref.mData.data() : nullptr,
|
| 244 |
+
saveMeanAndInvVariance ? resultSaveInvVariance_ref.mData.data() : nullptr,
|
| 245 |
+
averageFactor,
|
| 246 |
+
updateMovingAverage ? resultRunningMean_ref.mData.data() : nullptr,
|
| 247 |
+
updateMovingAverage ? resultRunningVariance_ref.mData.data() : nullptr);
|
| 248 |
+
|
| 249 |
+
if(!batchNormFwd_ref.IsSupportedArgument(argument_ptr_ref.get()))
|
| 250 |
+
{
|
| 251 |
+
std::cout << "The runtime parameters not supported by the reference instance, exiting!"
|
| 252 |
+
<< std::endl;
|
| 253 |
+
return (false);
|
| 254 |
+
};
|
| 255 |
+
|
| 256 |
+
auto invoker_ptr_ref = batchNormFwd_ref.MakeInvokerPointer();
|
| 257 |
+
|
| 258 |
+
(void)invoker_ptr_ref->Run(argument_ptr_ref.get());
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
int num_kernel = 0;
|
| 262 |
+
bool pass = true;
|
| 263 |
+
|
| 264 |
+
for(auto& inst_ptr : instance_ptrs)
|
| 265 |
+
{
|
| 266 |
+
auto argument_ptr = inst_ptr->MakeArgumentPointer(
|
| 267 |
+
arrInOutLengths,
|
| 268 |
+
arrInOutStrides,
|
| 269 |
+
arrInOutStrides,
|
| 270 |
+
arrReduceDims,
|
| 271 |
+
arrScaleBiasMeanVarLengths,
|
| 272 |
+
arrScaleBiasMeanVarStrides,
|
| 273 |
+
arrScaleBiasMeanVarStrides,
|
| 274 |
+
arrScaleBiasMeanVarStrides,
|
| 275 |
+
x_dev.GetDeviceBuffer(),
|
| 276 |
+
bnScale_dev.GetDeviceBuffer(),
|
| 277 |
+
bnBias_dev.GetDeviceBuffer(),
|
| 278 |
+
epsilon,
|
| 279 |
+
PassThroughOp{},
|
| 280 |
+
y_dev.GetDeviceBuffer(),
|
| 281 |
+
saveMeanAndInvVariance ? resultSaveMean_dev.GetDeviceBuffer() : nullptr,
|
| 282 |
+
saveMeanAndInvVariance ? resultSaveInvVariance_dev.GetDeviceBuffer() : nullptr,
|
| 283 |
+
averageFactor,
|
| 284 |
+
updateMovingAverage ? resultRunningMean_dev.GetDeviceBuffer() : nullptr,
|
| 285 |
+
updateMovingAverage ? resultRunningVariance_dev.GetDeviceBuffer() : nullptr);
|
| 286 |
+
|
| 287 |
+
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 288 |
+
{
|
| 289 |
+
num_kernel++;
|
| 290 |
+
}
|
| 291 |
+
else
|
| 292 |
+
{
|
| 293 |
+
if(time_kernel)
|
| 294 |
+
{
|
| 295 |
+
std::cout << inst_ptr->GetTypeString()
|
| 296 |
+
<< " skipped due to unsupported argument: " << std::endl;
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
continue;
|
| 300 |
+
};
|
| 301 |
+
|
| 302 |
+
size_t workspace_sz = inst_ptr->GetWorkSpaceSize(argument_ptr.get());
|
| 303 |
+
|
| 304 |
+
DeviceMem workspace_dev(workspace_sz);
|
| 305 |
+
|
| 306 |
+
inst_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_dev.GetDeviceBuffer());
|
| 307 |
+
|
| 308 |
+
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
|
| 309 |
+
|
| 310 |
+
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 311 |
+
|
| 312 |
+
size_t num_bytes = 0;
|
| 313 |
+
|
| 314 |
+
// inputing of x, scale, bias, outputing of y
|
| 315 |
+
num_bytes += total_length * (sizeof(XDataType) + sizeof(YDataType)) +
|
| 316 |
+
invariant_length * (sizeof(ScaleDataType) + sizeof(BiasDataType));
|
| 317 |
+
|
| 318 |
+
// outputing of mean, inv-variance
|
| 319 |
+
num_bytes += saveMeanAndInvVariance ? invariant_length * sizeof(MeanVarDataType) * 2 : 0;
|
| 320 |
+
|
| 321 |
+
// updating of moving mean, variance
|
| 322 |
+
num_bytes += updateMovingAverage ? invariant_length * sizeof(MeanVarDataType) * 4 : 0;
|
| 323 |
+
|
| 324 |
+
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
| 325 |
+
|
| 326 |
+
if(time_kernel)
|
| 327 |
+
std::cout << "Perf: " << avg_time << " ms, " << gb_per_sec << " GB/s, "
|
| 328 |
+
<< inst_ptr->GetTypeString() << std::endl;
|
| 329 |
+
|
| 330 |
+
if(avg_time < best_avg_time)
|
| 331 |
+
{
|
| 332 |
+
best_instance_name = inst_ptr->GetTypeString();
|
| 333 |
+
best_avg_time = avg_time;
|
| 334 |
+
best_gb_per_sec = gb_per_sec;
|
| 335 |
+
}
|
| 336 |
+
|
| 337 |
+
if(do_verification)
|
| 338 |
+
{
|
| 339 |
+
using ck::utils::check_err;
|
| 340 |
+
bool single_pass;
|
| 341 |
+
|
| 342 |
+
y_dev.FromDevice(y.mData.data());
|
| 343 |
+
|
| 344 |
+
if constexpr(ck::is_same_v<YDataType, ck::bhalf_t>)
|
| 345 |
+
single_pass = check_err(y.mData, y_ref.mData, "y results", 1e-2, 1e-2);
|
| 346 |
+
else
|
| 347 |
+
single_pass = check_err(y.mData, y_ref.mData, "y results", 4e-3, 4e-3);
|
| 348 |
+
|
| 349 |
+
if(updateMovingAverage)
|
| 350 |
+
{
|
| 351 |
+
resultRunningMean_dev.FromDevice(resultRunningMean.mData.data());
|
| 352 |
+
resultRunningVariance_dev.FromDevice(resultRunningVariance.mData.data());
|
| 353 |
+
|
| 354 |
+
// clang-format off
|
| 355 |
+
single_pass = single_pass && check_err(resultRunningMean.mData, resultRunningMean_ref.mData, "average mean results", 1.5e-5, 1.5e-5);
|
| 356 |
+
single_pass = single_pass && check_err(resultRunningVariance.mData, resultRunningVariance_ref.mData, "average variance results", 1e-5, 1e-5);
|
| 357 |
+
// clang-format on
|
| 358 |
+
};
|
| 359 |
+
|
| 360 |
+
if(saveMeanAndInvVariance)
|
| 361 |
+
{
|
| 362 |
+
resultSaveMean_dev.FromDevice(resultSaveMean.mData.data());
|
| 363 |
+
resultSaveInvVariance_dev.FromDevice(resultSaveInvVariance.mData.data());
|
| 364 |
+
|
| 365 |
+
// clang-format off
|
| 366 |
+
single_pass = single_pass && check_err(resultSaveMean.mData, resultSaveMean_ref.mData, "mean results", 3e-5, 3e-5);
|
| 367 |
+
single_pass = single_pass && check_err(resultSaveInvVariance.mData, resultSaveInvVariance_ref.mData, "inv-variance results", 7e-5, 7e-5);
|
| 368 |
+
// clang-format on
|
| 369 |
+
};
|
| 370 |
+
|
| 371 |
+
pass = pass && single_pass;
|
| 372 |
+
};
|
| 373 |
+
|
| 374 |
+
if(do_dumpout)
|
| 375 |
+
{
|
| 376 |
+
using ck::host_common::dumpBufferToFile;
|
| 377 |
+
|
| 378 |
+
// clang-format off
|
| 379 |
+
dumpBufferToFile("dump_x.bin", x.mData.data(), x.mDesc.GetElementSize());
|
| 380 |
+
dumpBufferToFile("dump_y.bin", y.mData.data(), y.mDesc.GetElementSize());
|
| 381 |
+
dumpBufferToFile("dump_y_ref.bin", y_ref.mData.data(), y_ref.mDesc.GetElementSize());
|
| 382 |
+
// clang-format off
|
| 383 |
+
|
| 384 |
+
if(saveMeanAndInvVariance)
|
| 385 |
+
{
|
| 386 |
+
// clang-format off
|
| 387 |
+
dumpBufferToFile("dump_mean.bin", resultSaveMean.mData.data(), resultSaveMean.mDesc.GetElementSize());
|
| 388 |
+
dumpBufferToFile("dump_mean_ref.bin", resultSaveMean_ref.mData.data(), resultSaveMean_ref.mDesc.GetElementSize());
|
| 389 |
+
dumpBufferToFile("dump_invvar.bin", resultSaveInvVariance.mData.data(), resultSaveInvVariance.mDesc.GetElementSize());
|
| 390 |
+
dumpBufferToFile("dump_invvar_ref.bin", resultSaveInvVariance_ref.mData.data(), resultSaveInvVariance_ref.mDesc.GetElementSize());
|
| 391 |
+
// clang-format on
|
| 392 |
+
};
|
| 393 |
+
};
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
if(time_kernel)
|
| 397 |
+
{
|
| 398 |
+
std::cout << "best perf = " << best_avg_time << " ms, " << best_gb_per_sec << " GB/s, "
|
| 399 |
+
<< best_instance_name << std::endl;
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
if(num_kernel == 0)
|
| 403 |
+
{
|
| 404 |
+
std::cout << "Error: No kernel is applicable" << std::endl;
|
| 405 |
+
return false;
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
return pass;
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
} // namespace profiler
|
| 412 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_contraction_utils.hpp
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2023-2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
|
| 10 |
+
using Row = ck::tensor_layout::gemm::RowMajor;
|
| 11 |
+
using Col = ck::tensor_layout::gemm::ColumnMajor;
|
| 12 |
+
|
| 13 |
+
using Bilinear = ck::tensor_operation::element_wise::Bilinear;
|
| 14 |
+
using Scale = ck::tensor_operation::element_wise::Scale;
|
| 15 |
+
|
| 16 |
+
enum struct ContractionMatrixLayout
|
| 17 |
+
{
|
| 18 |
+
MK_KN_MN_MN, // 0
|
| 19 |
+
MK_NK_MN_MN, // 1
|
| 20 |
+
KM_KN_MN_MN, // 2
|
| 21 |
+
KM_NK_MN_MN, // 3
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
enum struct ContractionDataType
|
| 25 |
+
{
|
| 26 |
+
F32_F32_F32_F32, // 0
|
| 27 |
+
F64_F64_F64_F64, // 1
|
| 28 |
+
F16_F16_F16_F16, // 2
|
| 29 |
+
BF16_BF16_BF16_BF16, // 3
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
enum struct ContractionComputeDataType
|
| 33 |
+
{
|
| 34 |
+
F32 = 0,
|
| 35 |
+
F64,
|
| 36 |
+
F16,
|
| 37 |
+
BF16,
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
inline void collect_index_params(char* argv[],
|
| 41 |
+
std::vector<ck::index_t>& params,
|
| 42 |
+
const ck::index_t from,
|
| 43 |
+
const ck::index_t num)
|
| 44 |
+
{
|
| 45 |
+
for(ck::index_t p = from; p < from + num; p++)
|
| 46 |
+
params.push_back(std::stoi(argv[p]));
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
// Defualt strides for row-major: {Dim1 * Dim2 * Dim3, Dim2 * Dim3, Dim3, 1}
|
| 50 |
+
// Defualt strides for column-major: {Dim1, 1, Dim0 * Dim1 * Dim3, Dim0 * Dim1}
|
| 51 |
+
|
| 52 |
+
// M1, 1, M0 * M1 * K1, M0 * M1
|
| 53 |
+
// K0, K1, M0, M1
|
| 54 |
+
inline void
|
| 55 |
+
assign_default_strides(Row, std::vector<ck::index_t>& strides, std::vector<ck::index_t> dims)
|
| 56 |
+
{
|
| 57 |
+
ck::index_t stride = 1;
|
| 58 |
+
for(ck::index_t s = strides.size() - 1; s >= 0; s--)
|
| 59 |
+
{
|
| 60 |
+
strides[s] = stride;
|
| 61 |
+
stride *= dims[s];
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
inline void
|
| 66 |
+
assign_default_strides(Col, std::vector<ck::index_t>& strides, std::vector<ck::index_t> dims)
|
| 67 |
+
{
|
| 68 |
+
// Assign second half of strides
|
| 69 |
+
ck::index_t stride = 1;
|
| 70 |
+
for(ck::index_t s = strides.size() / 2 - 1; s >= 0; s--)
|
| 71 |
+
{
|
| 72 |
+
strides[s] = stride;
|
| 73 |
+
stride *= dims[s];
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
// Assign first half of strides
|
| 77 |
+
for(ck::index_t s = strides.size() - 1; s > static_cast<ck::index_t>(strides.size()) / 2 - 1;
|
| 78 |
+
s--)
|
| 79 |
+
{
|
| 80 |
+
strides[s] = stride;
|
| 81 |
+
stride *= dims[s];
|
| 82 |
+
}
|
| 83 |
+
}
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_conv_bwd_data_impl.hpp
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include "ck/ck.hpp"
|
| 7 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 8 |
+
#include "ck/tensor_operation/gpu/device/device_conv_bwd_data.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 10 |
+
|
| 11 |
+
#include "ck/library/tensor_operation_instance/gpu/convolution_backward_data.hpp"
|
| 12 |
+
|
| 13 |
+
#include "ck/library/utility/check_err.hpp"
|
| 14 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 17 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
| 18 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
| 19 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_conv_bwd_data.hpp"
|
| 20 |
+
|
| 21 |
+
namespace ck {
|
| 22 |
+
namespace profiler {
|
| 23 |
+
|
| 24 |
+
template <typename DataType>
|
| 25 |
+
void show_data_nhwc_layout(Tensor<DataType>& nhwc)
|
| 26 |
+
{
|
| 27 |
+
std::cout << "[";
|
| 28 |
+
for(int n = 0; n < ck::type_convert<int>(nhwc.mDesc.GetLengths()[0]); n++)
|
| 29 |
+
{
|
| 30 |
+
std::cout << "[";
|
| 31 |
+
for(int hi = 0; hi < ck::type_convert<int>(nhwc.mDesc.GetLengths()[2]); hi++)
|
| 32 |
+
{
|
| 33 |
+
std::cout << "[";
|
| 34 |
+
for(int wi = 0; wi < ck::type_convert<int>(nhwc.mDesc.GetLengths()[3]); wi++)
|
| 35 |
+
{
|
| 36 |
+
std::cout << "[";
|
| 37 |
+
for(int c = 0; c < ck::type_convert<int>(nhwc.mDesc.GetLengths()[1]); c++)
|
| 38 |
+
{
|
| 39 |
+
std::cout << static_cast<float>(nhwc(n, c, hi, wi)) << " ";
|
| 40 |
+
}
|
| 41 |
+
std::cout << "]";
|
| 42 |
+
}
|
| 43 |
+
std::cout << "]";
|
| 44 |
+
}
|
| 45 |
+
std::cout << "]";
|
| 46 |
+
}
|
| 47 |
+
std::cout << "]";
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
template <ck::index_t NDimSpatial,
|
| 51 |
+
typename InLayout,
|
| 52 |
+
typename WeiLayout,
|
| 53 |
+
typename OutLayout,
|
| 54 |
+
typename InDataType,
|
| 55 |
+
typename WeiDataType,
|
| 56 |
+
typename OutDataType>
|
| 57 |
+
bool profile_conv_bwd_data_impl(int do_verification,
|
| 58 |
+
int init_method,
|
| 59 |
+
bool do_log,
|
| 60 |
+
bool time_kernel,
|
| 61 |
+
const ck::utils::conv::ConvParam& conv_param)
|
| 62 |
+
{
|
| 63 |
+
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 64 |
+
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 65 |
+
using OutElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 66 |
+
|
| 67 |
+
const auto in_element_op = InElementOp{};
|
| 68 |
+
const auto wei_element_op = WeiElementOp{};
|
| 69 |
+
const auto out_element_op = OutElementOp{};
|
| 70 |
+
|
| 71 |
+
const auto in_g_n_c_wis_desc =
|
| 72 |
+
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>(conv_param);
|
| 73 |
+
|
| 74 |
+
const auto wei_g_k_c_xs_desc =
|
| 75 |
+
ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>(conv_param);
|
| 76 |
+
|
| 77 |
+
const auto out_g_n_k_wos_desc =
|
| 78 |
+
ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>(conv_param);
|
| 79 |
+
|
| 80 |
+
Tensor<InDataType> input_host_result(in_g_n_c_wis_desc);
|
| 81 |
+
Tensor<InDataType> input_device_result(in_g_n_c_wis_desc);
|
| 82 |
+
Tensor<WeiDataType> weight(wei_g_k_c_xs_desc);
|
| 83 |
+
Tensor<OutDataType> output(out_g_n_k_wos_desc);
|
| 84 |
+
|
| 85 |
+
std::vector<ck::index_t> input_spatial_lengths_i32(NDimSpatial);
|
| 86 |
+
std::vector<ck::index_t> filter_spatial_lengths_i32(NDimSpatial);
|
| 87 |
+
std::vector<ck::index_t> output_spatial_lengths_i32(NDimSpatial);
|
| 88 |
+
std::vector<ck::index_t> conv_filter_strides_i32(NDimSpatial);
|
| 89 |
+
std::vector<ck::index_t> conv_filter_dilations_i32(NDimSpatial);
|
| 90 |
+
std::vector<ck::index_t> input_left_pads_i32(NDimSpatial);
|
| 91 |
+
std::vector<ck::index_t> input_right_pads_i32(NDimSpatial);
|
| 92 |
+
|
| 93 |
+
for(ck::index_t d = 0; d < NDimSpatial; d++)
|
| 94 |
+
{
|
| 95 |
+
input_spatial_lengths_i32[d] =
|
| 96 |
+
static_cast<ck::index_t>(conv_param.input_spatial_lengths_[d]);
|
| 97 |
+
filter_spatial_lengths_i32[d] =
|
| 98 |
+
static_cast<ck::index_t>(conv_param.filter_spatial_lengths_[d]);
|
| 99 |
+
output_spatial_lengths_i32[d] =
|
| 100 |
+
static_cast<ck::index_t>(conv_param.GetOutputSpatialLengths()[d]);
|
| 101 |
+
conv_filter_strides_i32[d] = static_cast<ck::index_t>(conv_param.conv_filter_strides_[d]);
|
| 102 |
+
conv_filter_dilations_i32[d] =
|
| 103 |
+
static_cast<ck::index_t>(conv_param.conv_filter_dilations_[d]);
|
| 104 |
+
input_left_pads_i32[d] = static_cast<ck::index_t>(conv_param.input_left_pads_[d]);
|
| 105 |
+
input_right_pads_i32[d] = static_cast<ck::index_t>(conv_param.input_right_pads_[d]);
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
std::cout << "input: " << input_host_result.mDesc << std::endl;
|
| 109 |
+
std::cout << "weight: " << weight.mDesc << std::endl;
|
| 110 |
+
std::cout << "output: " << output.mDesc << std::endl;
|
| 111 |
+
|
| 112 |
+
switch(init_method)
|
| 113 |
+
{
|
| 114 |
+
case 0: break;
|
| 115 |
+
case 1:
|
| 116 |
+
output.GenerateTensorValue(GeneratorTensor_2<OutDataType>{-5, 5});
|
| 117 |
+
weight.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-5, 5});
|
| 118 |
+
break;
|
| 119 |
+
default:
|
| 120 |
+
output.GenerateTensorValue(GeneratorTensor_3<OutDataType>{0.0, 1.0});
|
| 121 |
+
weight.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
DeviceMem in_device_buf(sizeof(InDataType) * input_device_result.mDesc.GetElementSpaceSize());
|
| 125 |
+
DeviceMem wei_device_buf(sizeof(WeiDataType) * weight.mDesc.GetElementSpaceSize());
|
| 126 |
+
DeviceMem out_device_buf(sizeof(OutDataType) * output.mDesc.GetElementSpaceSize());
|
| 127 |
+
|
| 128 |
+
out_device_buf.ToDevice(output.mData.data());
|
| 129 |
+
wei_device_buf.ToDevice(weight.mData.data());
|
| 130 |
+
|
| 131 |
+
if(do_verification)
|
| 132 |
+
{
|
| 133 |
+
auto ref_conv = ck::tensor_operation::host::ReferenceConvBwdData<NDimSpatial,
|
| 134 |
+
InDataType,
|
| 135 |
+
WeiDataType,
|
| 136 |
+
OutDataType,
|
| 137 |
+
InElementOp,
|
| 138 |
+
WeiElementOp,
|
| 139 |
+
OutElementOp>{};
|
| 140 |
+
|
| 141 |
+
auto ref_invoker = ref_conv.MakeInvoker();
|
| 142 |
+
|
| 143 |
+
auto ref_argument = ref_conv.MakeArgument(input_host_result,
|
| 144 |
+
weight,
|
| 145 |
+
output,
|
| 146 |
+
conv_param.conv_filter_strides_,
|
| 147 |
+
conv_param.conv_filter_dilations_,
|
| 148 |
+
conv_param.input_left_pads_,
|
| 149 |
+
conv_param.input_right_pads_,
|
| 150 |
+
InElementOp{},
|
| 151 |
+
WeiElementOp{},
|
| 152 |
+
OutElementOp{});
|
| 153 |
+
ref_invoker.Run(ref_argument);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
using DeviceOp = ck::tensor_operation::device::DeviceConvBwdData<NDimSpatial,
|
| 157 |
+
InLayout,
|
| 158 |
+
WeiLayout,
|
| 159 |
+
OutLayout,
|
| 160 |
+
InDataType,
|
| 161 |
+
WeiDataType,
|
| 162 |
+
OutDataType,
|
| 163 |
+
InElementOp,
|
| 164 |
+
WeiElementOp,
|
| 165 |
+
OutElementOp>;
|
| 166 |
+
|
| 167 |
+
// get device op instances
|
| 168 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 169 |
+
DeviceOp>::GetInstances();
|
| 170 |
+
|
| 171 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 172 |
+
|
| 173 |
+
std::string best_op_name;
|
| 174 |
+
float best_avg_time = 0;
|
| 175 |
+
float best_tflops = 0;
|
| 176 |
+
float best_gb_per_sec = 0;
|
| 177 |
+
|
| 178 |
+
// profile device Conv instances
|
| 179 |
+
bool pass = true;
|
| 180 |
+
|
| 181 |
+
for(auto& op_ptr : op_ptrs)
|
| 182 |
+
{
|
| 183 |
+
auto argument_ptr =
|
| 184 |
+
op_ptr->MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
| 185 |
+
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
|
| 186 |
+
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
| 187 |
+
static_cast<ck::index_t>(conv_param.N_),
|
| 188 |
+
static_cast<ck::index_t>(conv_param.K_),
|
| 189 |
+
static_cast<ck::index_t>(conv_param.C_),
|
| 190 |
+
input_spatial_lengths_i32,
|
| 191 |
+
filter_spatial_lengths_i32,
|
| 192 |
+
output_spatial_lengths_i32,
|
| 193 |
+
conv_filter_strides_i32,
|
| 194 |
+
conv_filter_dilations_i32,
|
| 195 |
+
input_left_pads_i32,
|
| 196 |
+
input_right_pads_i32,
|
| 197 |
+
in_element_op,
|
| 198 |
+
wei_element_op,
|
| 199 |
+
out_element_op);
|
| 200 |
+
|
| 201 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 202 |
+
{
|
| 203 |
+
// for conv bwd data, some input tensor element are zero, but not written by kernel,
|
| 204 |
+
// need to set zero
|
| 205 |
+
in_device_buf.SetZero();
|
| 206 |
+
|
| 207 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 208 |
+
|
| 209 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 210 |
+
|
| 211 |
+
float avg_time =
|
| 212 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 213 |
+
|
| 214 |
+
std::size_t flop = conv_param.GetFlops();
|
| 215 |
+
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
| 216 |
+
|
| 217 |
+
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
| 218 |
+
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
| 219 |
+
|
| 220 |
+
std::cout << "Perf: " << avg_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
| 221 |
+
<< " GB/s" << std::endl;
|
| 222 |
+
|
| 223 |
+
if(tflops > best_tflops)
|
| 224 |
+
{
|
| 225 |
+
best_op_name = op_name;
|
| 226 |
+
best_tflops = tflops;
|
| 227 |
+
best_avg_time = avg_time;
|
| 228 |
+
best_gb_per_sec = gb_per_sec;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
if(do_verification)
|
| 232 |
+
{
|
| 233 |
+
in_device_buf.FromDevice(input_device_result.mData.data());
|
| 234 |
+
|
| 235 |
+
pass = pass & ck::utils::check_err(input_device_result, input_host_result);
|
| 236 |
+
|
| 237 |
+
if(do_log)
|
| 238 |
+
{
|
| 239 |
+
std::cout << "in : ";
|
| 240 |
+
show_data_nhwc_layout(output);
|
| 241 |
+
std::cout << std::endl;
|
| 242 |
+
|
| 243 |
+
std::cout << "wei: ";
|
| 244 |
+
show_data_nhwc_layout(weight);
|
| 245 |
+
std::cout << std::endl;
|
| 246 |
+
|
| 247 |
+
std::cout << "out_host : ";
|
| 248 |
+
show_data_nhwc_layout(input_host_result);
|
| 249 |
+
std::cout << std::endl;
|
| 250 |
+
|
| 251 |
+
std::cout << "out_device: ";
|
| 252 |
+
show_data_nhwc_layout(input_device_result);
|
| 253 |
+
std::cout << std::endl;
|
| 254 |
+
}
|
| 255 |
+
}
|
| 256 |
+
}
|
| 257 |
+
else
|
| 258 |
+
{
|
| 259 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 260 |
+
}
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
std::cout << "Best configuration parameters:"
|
| 264 |
+
<< "\nname: " << best_op_name << "\navg_time: " << best_avg_time
|
| 265 |
+
<< "\ntflops: " << best_tflops << "\nGB/s: " << best_gb_per_sec << std::endl;
|
| 266 |
+
|
| 267 |
+
return pass;
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
} // namespace profiler
|
| 271 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_conv_fwd_impl.hpp
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/device/device_conv_fwd.hpp"
|
| 13 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/convolution_forward.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
| 22 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
| 23 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp"
|
| 24 |
+
|
| 25 |
+
namespace ck {
|
| 26 |
+
namespace profiler {
|
| 27 |
+
|
| 28 |
+
template <ck::index_t NDimSpatial,
|
| 29 |
+
typename InLayout,
|
| 30 |
+
typename WeiLayout,
|
| 31 |
+
typename OutLayout,
|
| 32 |
+
typename InDataType,
|
| 33 |
+
typename WeiDataType,
|
| 34 |
+
typename OutDataType>
|
| 35 |
+
bool profile_conv_fwd_impl(int do_verification,
|
| 36 |
+
int init_method,
|
| 37 |
+
bool do_log,
|
| 38 |
+
bool time_kernel,
|
| 39 |
+
const ck::utils::conv::ConvParam& conv_param)
|
| 40 |
+
{
|
| 41 |
+
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 42 |
+
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 43 |
+
using OutElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 44 |
+
|
| 45 |
+
const auto in_element_op = InElementOp{};
|
| 46 |
+
const auto wei_element_op = WeiElementOp{};
|
| 47 |
+
const auto out_element_op = OutElementOp{};
|
| 48 |
+
|
| 49 |
+
const auto in_g_n_c_wis_desc =
|
| 50 |
+
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>(conv_param);
|
| 51 |
+
|
| 52 |
+
const auto wei_g_k_c_xs_desc =
|
| 53 |
+
ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>(conv_param);
|
| 54 |
+
|
| 55 |
+
const auto out_g_n_k_wos_desc =
|
| 56 |
+
ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>(conv_param);
|
| 57 |
+
|
| 58 |
+
Tensor<InDataType> input(in_g_n_c_wis_desc);
|
| 59 |
+
Tensor<WeiDataType> weight(wei_g_k_c_xs_desc);
|
| 60 |
+
Tensor<OutDataType> host_output(out_g_n_k_wos_desc);
|
| 61 |
+
Tensor<OutDataType> device_output(out_g_n_k_wos_desc);
|
| 62 |
+
|
| 63 |
+
std::vector<ck::index_t> input_spatial_lengths_i32(NDimSpatial);
|
| 64 |
+
std::vector<ck::index_t> filter_spatial_lengths_i32(NDimSpatial);
|
| 65 |
+
std::vector<ck::index_t> output_spatial_lengths_i32(NDimSpatial);
|
| 66 |
+
std::vector<ck::index_t> conv_filter_strides_i32(NDimSpatial);
|
| 67 |
+
std::vector<ck::index_t> conv_filter_dilations_i32(NDimSpatial);
|
| 68 |
+
std::vector<ck::index_t> input_left_pads_i32(NDimSpatial);
|
| 69 |
+
std::vector<ck::index_t> input_right_pads_i32(NDimSpatial);
|
| 70 |
+
|
| 71 |
+
for(ck::index_t d = 0; d < NDimSpatial; d++)
|
| 72 |
+
{
|
| 73 |
+
input_spatial_lengths_i32[d] =
|
| 74 |
+
static_cast<ck::index_t>(conv_param.input_spatial_lengths_[d]);
|
| 75 |
+
filter_spatial_lengths_i32[d] =
|
| 76 |
+
static_cast<ck::index_t>(conv_param.filter_spatial_lengths_[d]);
|
| 77 |
+
output_spatial_lengths_i32[d] =
|
| 78 |
+
static_cast<ck::index_t>(conv_param.GetOutputSpatialLengths()[d]);
|
| 79 |
+
conv_filter_strides_i32[d] = static_cast<ck::index_t>(conv_param.conv_filter_strides_[d]);
|
| 80 |
+
conv_filter_dilations_i32[d] =
|
| 81 |
+
static_cast<ck::index_t>(conv_param.conv_filter_dilations_[d]);
|
| 82 |
+
input_left_pads_i32[d] = static_cast<ck::index_t>(conv_param.input_left_pads_[d]);
|
| 83 |
+
input_right_pads_i32[d] = static_cast<ck::index_t>(conv_param.input_right_pads_[d]);
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
std::cout << "input: " << input.mDesc << std::endl;
|
| 87 |
+
std::cout << "weight: " << weight.mDesc << std::endl;
|
| 88 |
+
std::cout << "output: " << host_output.mDesc << std::endl;
|
| 89 |
+
|
| 90 |
+
switch(init_method)
|
| 91 |
+
{
|
| 92 |
+
case 0: break;
|
| 93 |
+
case 1:
|
| 94 |
+
input.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
|
| 95 |
+
weight.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-5, 5});
|
| 96 |
+
break;
|
| 97 |
+
default:
|
| 98 |
+
input.GenerateTensorValue(GeneratorTensor_3<InDataType>{0.0, 1.0});
|
| 99 |
+
weight.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpaceSize());
|
| 103 |
+
DeviceMem wei_device_buf(sizeof(WeiDataType) * weight.mDesc.GetElementSpaceSize());
|
| 104 |
+
DeviceMem out_device_buf(sizeof(OutDataType) * device_output.mDesc.GetElementSpaceSize());
|
| 105 |
+
|
| 106 |
+
in_device_buf.ToDevice(input.mData.data());
|
| 107 |
+
wei_device_buf.ToDevice(weight.mData.data());
|
| 108 |
+
|
| 109 |
+
// run reference op
|
| 110 |
+
if(do_verification)
|
| 111 |
+
{
|
| 112 |
+
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<NDimSpatial,
|
| 113 |
+
InDataType,
|
| 114 |
+
WeiDataType,
|
| 115 |
+
OutDataType,
|
| 116 |
+
InElementOp,
|
| 117 |
+
WeiElementOp,
|
| 118 |
+
OutElementOp>{};
|
| 119 |
+
|
| 120 |
+
auto ref_invoker = ref_conv.MakeInvoker();
|
| 121 |
+
auto ref_argument = ref_conv.MakeArgument(input,
|
| 122 |
+
weight,
|
| 123 |
+
host_output,
|
| 124 |
+
conv_param.conv_filter_strides_,
|
| 125 |
+
conv_param.conv_filter_dilations_,
|
| 126 |
+
conv_param.input_left_pads_,
|
| 127 |
+
conv_param.input_right_pads_,
|
| 128 |
+
in_element_op,
|
| 129 |
+
wei_element_op,
|
| 130 |
+
out_element_op);
|
| 131 |
+
|
| 132 |
+
// init host output to zero
|
| 133 |
+
host_output.SetZero();
|
| 134 |
+
|
| 135 |
+
ref_invoker.Run(ref_argument);
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
using DeviceOp = ck::tensor_operation::device::DeviceConvFwd<NDimSpatial,
|
| 139 |
+
InLayout,
|
| 140 |
+
WeiLayout,
|
| 141 |
+
OutLayout,
|
| 142 |
+
InDataType,
|
| 143 |
+
WeiDataType,
|
| 144 |
+
OutDataType,
|
| 145 |
+
InElementOp,
|
| 146 |
+
WeiElementOp,
|
| 147 |
+
OutElementOp>;
|
| 148 |
+
|
| 149 |
+
// get device op instances
|
| 150 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 151 |
+
DeviceOp>::GetInstances();
|
| 152 |
+
|
| 153 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 154 |
+
|
| 155 |
+
std::string best_op_name;
|
| 156 |
+
float best_avg_time = 0;
|
| 157 |
+
float best_tflops = 0;
|
| 158 |
+
float best_gb_per_sec = 0;
|
| 159 |
+
|
| 160 |
+
// profile device op instances
|
| 161 |
+
bool pass = true;
|
| 162 |
+
|
| 163 |
+
for(auto& op_ptr : op_ptrs)
|
| 164 |
+
{
|
| 165 |
+
auto argument_ptr =
|
| 166 |
+
op_ptr->MakeArgumentPointer(static_cast<InDataType*>(in_device_buf.GetDeviceBuffer()),
|
| 167 |
+
static_cast<WeiDataType*>(wei_device_buf.GetDeviceBuffer()),
|
| 168 |
+
static_cast<OutDataType*>(out_device_buf.GetDeviceBuffer()),
|
| 169 |
+
static_cast<ck::index_t>(conv_param.N_),
|
| 170 |
+
static_cast<ck::index_t>(conv_param.K_),
|
| 171 |
+
static_cast<ck::index_t>(conv_param.C_),
|
| 172 |
+
input_spatial_lengths_i32,
|
| 173 |
+
filter_spatial_lengths_i32,
|
| 174 |
+
output_spatial_lengths_i32,
|
| 175 |
+
conv_filter_strides_i32,
|
| 176 |
+
conv_filter_dilations_i32,
|
| 177 |
+
input_left_pads_i32,
|
| 178 |
+
input_right_pads_i32,
|
| 179 |
+
in_element_op,
|
| 180 |
+
wei_element_op,
|
| 181 |
+
out_element_op);
|
| 182 |
+
|
| 183 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 184 |
+
{
|
| 185 |
+
// re-init output to zero before profiling next kernel
|
| 186 |
+
out_device_buf.SetZero();
|
| 187 |
+
|
| 188 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 189 |
+
|
| 190 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 191 |
+
|
| 192 |
+
float avg_time =
|
| 193 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 194 |
+
|
| 195 |
+
std::size_t flop = conv_param.GetFlops();
|
| 196 |
+
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
| 197 |
+
|
| 198 |
+
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
| 199 |
+
|
| 200 |
+
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
| 201 |
+
|
| 202 |
+
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, "
|
| 203 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 204 |
+
|
| 205 |
+
if(tflops > best_tflops)
|
| 206 |
+
{
|
| 207 |
+
best_op_name = op_name;
|
| 208 |
+
best_tflops = tflops;
|
| 209 |
+
best_avg_time = avg_time;
|
| 210 |
+
best_gb_per_sec = gb_per_sec;
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
if(do_verification)
|
| 214 |
+
{
|
| 215 |
+
out_device_buf.FromDevice(device_output.mData.data());
|
| 216 |
+
|
| 217 |
+
pass = pass & ck::utils::check_err(device_output, host_output);
|
| 218 |
+
|
| 219 |
+
if(do_log)
|
| 220 |
+
{
|
| 221 |
+
LogRangeAsType<float>(std::cout << "input : ", input.mData, ",") << std::endl;
|
| 222 |
+
LogRangeAsType<float>(std::cout << "weight: ", weight.mData, ",") << std::endl;
|
| 223 |
+
LogRangeAsType<float>(std::cout << "host_output : ", host_output.mData, ",")
|
| 224 |
+
<< std::endl;
|
| 225 |
+
LogRangeAsType<float>(std::cout << "device_output: ", device_output.mData, ",")
|
| 226 |
+
<< std::endl;
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
+
}
|
| 230 |
+
else
|
| 231 |
+
{
|
| 232 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 233 |
+
}
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
std::cout << "Best configuration parameters:"
|
| 237 |
+
<< "\nname: " << best_op_name << "\navg_time: " << best_avg_time
|
| 238 |
+
<< "\ntflops: " << best_tflops << "\nGB/s: " << best_gb_per_sec << std::endl;
|
| 239 |
+
|
| 240 |
+
return pass;
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
} // namespace profiler
|
| 244 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_elementwise_layernorm_impl.hpp
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
|
| 10 |
+
#include "ck/library/tensor_operation_instance/gpu/elementwise_normalization.hpp"
|
| 11 |
+
|
| 12 |
+
#include "ck/library/utility/check_err.hpp"
|
| 13 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 14 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 15 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 16 |
+
#include "ck/library/utility/literals.hpp"
|
| 17 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_layernorm.hpp"
|
| 18 |
+
|
| 19 |
+
namespace ck {
|
| 20 |
+
namespace profiler {
|
| 21 |
+
|
| 22 |
+
template <typename HostTensorA, typename HostTensorB, typename HostTensorC, typename Functor>
|
| 23 |
+
void host_elementwise2D(HostTensorC& C,
|
| 24 |
+
const HostTensorA& A,
|
| 25 |
+
const HostTensorB& B,
|
| 26 |
+
const std::vector<std::size_t>& shape,
|
| 27 |
+
Functor functor)
|
| 28 |
+
{
|
| 29 |
+
using ctype = ck::remove_reference_t<decltype(C(0, 0))>;
|
| 30 |
+
|
| 31 |
+
for(std::size_t m = 0; m < shape[0]; ++m)
|
| 32 |
+
for(std::size_t n = 0; n < shape[1]; ++n)
|
| 33 |
+
{
|
| 34 |
+
auto a_val = A(m, n);
|
| 35 |
+
auto b_val = B(m, n);
|
| 36 |
+
ctype c_val = 0;
|
| 37 |
+
functor(c_val, a_val, b_val);
|
| 38 |
+
C(m, n) = c_val;
|
| 39 |
+
}
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
template <typename ADataType,
|
| 43 |
+
typename BDataType,
|
| 44 |
+
typename GammaDataType,
|
| 45 |
+
typename BetaDataType,
|
| 46 |
+
typename AccDataType,
|
| 47 |
+
typename YDataType>
|
| 48 |
+
bool profile_elementwise_layernorm_impl(int do_verification,
|
| 49 |
+
int init_method,
|
| 50 |
+
bool do_log,
|
| 51 |
+
bool time_kernel,
|
| 52 |
+
std::vector<index_t> length)
|
| 53 |
+
{
|
| 54 |
+
using Add = ck::tensor_operation::element_wise::Add;
|
| 55 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 56 |
+
|
| 57 |
+
if(length.size() != 2)
|
| 58 |
+
return false;
|
| 59 |
+
|
| 60 |
+
index_t M = length[0];
|
| 61 |
+
index_t N = length[1];
|
| 62 |
+
index_t Stride = N;
|
| 63 |
+
|
| 64 |
+
constexpr int Rank = 2;
|
| 65 |
+
constexpr int NumReduceDim = 1;
|
| 66 |
+
|
| 67 |
+
std::vector<index_t> reduce_dim = {1};
|
| 68 |
+
std::vector<index_t> gammaBetaLength = {N};
|
| 69 |
+
std::vector<index_t> gammaBetaStride = {0, 1};
|
| 70 |
+
|
| 71 |
+
auto f_host_tensor_descriptor2d = [](std::size_t row, std::size_t col, std::size_t stride) {
|
| 72 |
+
using namespace ck::literals;
|
| 73 |
+
|
| 74 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
+
Tensor<ADataType> a(length);
|
| 78 |
+
Tensor<BDataType> b(length);
|
| 79 |
+
Tensor<GammaDataType> gamma(gammaBetaLength);
|
| 80 |
+
Tensor<BetaDataType> beta(gammaBetaLength);
|
| 81 |
+
Tensor<YDataType> y(length);
|
| 82 |
+
Tensor<YDataType> host_y(length);
|
| 83 |
+
Tensor<AccDataType> host_save_mean({M});
|
| 84 |
+
Tensor<AccDataType> host_save_inv_std({M});
|
| 85 |
+
|
| 86 |
+
switch(init_method)
|
| 87 |
+
{
|
| 88 |
+
case 0:
|
| 89 |
+
a.GenerateTensorValue(GeneratorTensor_1<ADataType>{});
|
| 90 |
+
b.GenerateTensorValue(GeneratorTensor_1<BDataType>{});
|
| 91 |
+
gamma.GenerateTensorValue(GeneratorTensor_1<GammaDataType>{});
|
| 92 |
+
beta.GenerateTensorValue(GeneratorTensor_1<BetaDataType>{});
|
| 93 |
+
break;
|
| 94 |
+
case 1:
|
| 95 |
+
a.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
| 96 |
+
b.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
| 97 |
+
gamma.GenerateTensorValue(GeneratorTensor_2<GammaDataType>{-5, 5});
|
| 98 |
+
beta.GenerateTensorValue(GeneratorTensor_2<BetaDataType>{-5, 5});
|
| 99 |
+
break;
|
| 100 |
+
default:
|
| 101 |
+
a.GenerateTensorValue(GeneratorTensor_3<ADataType>{0, 1});
|
| 102 |
+
b.GenerateTensorValue(GeneratorTensor_3<BDataType>{0, 1});
|
| 103 |
+
gamma.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{-0.5, 0.5});
|
| 104 |
+
beta.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{-0.5, 0.5});
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
DeviceMem a_dev(sizeof(ADataType) * a.mDesc.GetElementSpaceSize());
|
| 108 |
+
DeviceMem b_dev(sizeof(ADataType) * b.mDesc.GetElementSpaceSize());
|
| 109 |
+
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
| 110 |
+
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
| 111 |
+
DeviceMem y_dev(sizeof(YDataType) * y.mDesc.GetElementSpaceSize());
|
| 112 |
+
|
| 113 |
+
a_dev.ToDevice(a.mData.data());
|
| 114 |
+
b_dev.ToDevice(b.mData.data());
|
| 115 |
+
gamma_dev.ToDevice(gamma.mData.data());
|
| 116 |
+
beta_dev.ToDevice(beta.mData.data());
|
| 117 |
+
|
| 118 |
+
std::array<const void*, 2> input = {a_dev.GetDeviceBuffer(), b_dev.GetDeviceBuffer()};
|
| 119 |
+
|
| 120 |
+
// add device normalization instances
|
| 121 |
+
using DeviceOp = ck::tensor_operation::device::DeviceElementwiseNormalization<
|
| 122 |
+
ck::Tuple<ADataType, BDataType>,
|
| 123 |
+
GammaDataType,
|
| 124 |
+
BetaDataType,
|
| 125 |
+
AccDataType,
|
| 126 |
+
YDataType,
|
| 127 |
+
Add,
|
| 128 |
+
PassThrough,
|
| 129 |
+
2,
|
| 130 |
+
1>;
|
| 131 |
+
|
| 132 |
+
// get device op instances
|
| 133 |
+
const auto instance_ptrs =
|
| 134 |
+
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 135 |
+
DeviceOp>::GetInstances();
|
| 136 |
+
|
| 137 |
+
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
|
| 138 |
+
|
| 139 |
+
std::string best_instance_name;
|
| 140 |
+
float best_avg_time = std::numeric_limits<float>::max();
|
| 141 |
+
float best_gb_per_sec = 0;
|
| 142 |
+
|
| 143 |
+
if(do_verification)
|
| 144 |
+
{
|
| 145 |
+
using XDataType = ADataType;
|
| 146 |
+
std::vector<std::size_t> mn = {static_cast<unsigned long>(M),
|
| 147 |
+
static_cast<unsigned long>(N)};
|
| 148 |
+
Tensor<XDataType> x(f_host_tensor_descriptor2d(M, N, Stride));
|
| 149 |
+
host_elementwise2D<Tensor<ADataType>, Tensor<BDataType>, Tensor<XDataType>, Add>(
|
| 150 |
+
x, a, b, mn, Add{});
|
| 151 |
+
|
| 152 |
+
using ReferenceInstance = ck::tensor_operation::host::ReferenceLayernorm<XDataType,
|
| 153 |
+
GammaDataType,
|
| 154 |
+
BetaDataType,
|
| 155 |
+
YDataType,
|
| 156 |
+
AccDataType,
|
| 157 |
+
AccDataType,
|
| 158 |
+
PassThrough,
|
| 159 |
+
Rank,
|
| 160 |
+
NumReduceDim>;
|
| 161 |
+
|
| 162 |
+
ReferenceInstance ref;
|
| 163 |
+
auto ref_argument = ref.MakeArgument(x,
|
| 164 |
+
gamma,
|
| 165 |
+
beta,
|
| 166 |
+
host_y,
|
| 167 |
+
host_save_mean,
|
| 168 |
+
host_save_inv_std,
|
| 169 |
+
PassThrough{},
|
| 170 |
+
{M, N},
|
| 171 |
+
{1},
|
| 172 |
+
1e-4);
|
| 173 |
+
auto ref_invoker = ref.MakeInvoker();
|
| 174 |
+
ref_invoker.Run(ref_argument);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
int num_kernel = 0;
|
| 178 |
+
|
| 179 |
+
for(auto& inst_ptr : instance_ptrs)
|
| 180 |
+
{
|
| 181 |
+
auto argument_ptr = inst_ptr->MakeArgumentPointer(
|
| 182 |
+
length,
|
| 183 |
+
{
|
| 184 |
+
std::vector<ck::index_t>{a.mDesc.GetStrides().begin(), a.mDesc.GetStrides().end()},
|
| 185 |
+
std::vector<ck::index_t>{b.mDesc.GetStrides().begin(), b.mDesc.GetStrides().end()},
|
| 186 |
+
},
|
| 187 |
+
gammaBetaStride,
|
| 188 |
+
gammaBetaStride,
|
| 189 |
+
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
| 190 |
+
reduce_dim,
|
| 191 |
+
1e-4,
|
| 192 |
+
input,
|
| 193 |
+
gamma_dev.GetDeviceBuffer(),
|
| 194 |
+
beta_dev.GetDeviceBuffer(),
|
| 195 |
+
y_dev.GetDeviceBuffer(),
|
| 196 |
+
Add{},
|
| 197 |
+
PassThrough{});
|
| 198 |
+
|
| 199 |
+
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 200 |
+
{
|
| 201 |
+
++num_kernel;
|
| 202 |
+
}
|
| 203 |
+
else
|
| 204 |
+
{
|
| 205 |
+
continue;
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
|
| 209 |
+
|
| 210 |
+
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 211 |
+
|
| 212 |
+
std::size_t num_bytes = a.mDesc.GetElementSize() * sizeof(ADataType) +
|
| 213 |
+
b.mDesc.GetElementSize() * sizeof(BDataType) +
|
| 214 |
+
gamma.mDesc.GetElementSize() * sizeof(GammaDataType) +
|
| 215 |
+
beta.mDesc.GetElementSize() * sizeof(BetaDataType) +
|
| 216 |
+
y.mDesc.GetElementSize() * sizeof(YDataType);
|
| 217 |
+
|
| 218 |
+
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
| 219 |
+
|
| 220 |
+
if(time_kernel)
|
| 221 |
+
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << gb_per_sec << " GB/s, "
|
| 222 |
+
<< inst_ptr->GetTypeString() << std::endl;
|
| 223 |
+
|
| 224 |
+
if(avg_time < best_avg_time)
|
| 225 |
+
{
|
| 226 |
+
best_instance_name = inst_ptr->GetTypeString();
|
| 227 |
+
best_avg_time = avg_time;
|
| 228 |
+
best_gb_per_sec = gb_per_sec;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
if(do_verification)
|
| 232 |
+
{
|
| 233 |
+
y_dev.FromDevice(y.mData.data());
|
| 234 |
+
|
| 235 |
+
bool pass =
|
| 236 |
+
ck::utils::check_err(y.mData, host_y.mData, "Error: Incorrect results", 5e-3, 5e-3);
|
| 237 |
+
|
| 238 |
+
if(do_log)
|
| 239 |
+
{
|
| 240 |
+
LogRangeAsType<float>(std::cout << "a : ", a.mData, ",") << std::endl;
|
| 241 |
+
LogRangeAsType<float>(std::cout << "b : ", b.mData, ",") << std::endl;
|
| 242 |
+
LogRangeAsType<float>(std::cout << "host_y : ", host_y.mData, ",") << std::endl;
|
| 243 |
+
LogRangeAsType<float>(std::cout << "y : ", y.mData, ",") << std::endl;
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
if(!pass)
|
| 247 |
+
{
|
| 248 |
+
std::cout << inst_ptr->GetTypeString() << " failed verification: ";
|
| 249 |
+
LogRange(std::cout << "lengths = [", length, ", ") << "]." << std::endl;
|
| 250 |
+
return false;
|
| 251 |
+
}
|
| 252 |
+
else
|
| 253 |
+
{
|
| 254 |
+
if(time_kernel)
|
| 255 |
+
std::cout << "pass" << std::endl;
|
| 256 |
+
}
|
| 257 |
+
}
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
if(time_kernel)
|
| 261 |
+
{
|
| 262 |
+
LogRange(std::cout << "length = ", length, ",") << ", ";
|
| 263 |
+
std::cout << "num_kernel = " << num_kernel << ", best perf = " << best_avg_time << " ms, "
|
| 264 |
+
<< best_gb_per_sec << " GB/s, " << best_instance_name << std::endl;
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
if(num_kernel == 0)
|
| 268 |
+
{
|
| 269 |
+
std::cout << "Error: No kernel is tested" << std::endl;
|
| 270 |
+
return false;
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
return true;
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
} // namespace profiler
|
| 277 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_ab_scale_impl.hpp
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_xdl_cshuffle_v3_ab_scale.hpp"
|
| 13 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_ab_scale.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/literals.hpp"
|
| 22 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 23 |
+
|
| 24 |
+
namespace ck {
|
| 25 |
+
namespace profiler {
|
| 26 |
+
|
| 27 |
+
template <typename A0DataType,
|
| 28 |
+
typename A1DataType,
|
| 29 |
+
typename B0DataType,
|
| 30 |
+
typename B1DataType,
|
| 31 |
+
typename ComputeDataType,
|
| 32 |
+
typename AccDataType,
|
| 33 |
+
typename EDataType,
|
| 34 |
+
index_t ScaleBlockM,
|
| 35 |
+
index_t ScaleBlockN,
|
| 36 |
+
index_t ScaleBlockK,
|
| 37 |
+
typename ALayout,
|
| 38 |
+
typename BLayout,
|
| 39 |
+
typename ELayout>
|
| 40 |
+
bool profile_gemm_ab_scale_impl(int do_verification,
|
| 41 |
+
int init_method,
|
| 42 |
+
bool do_log,
|
| 43 |
+
bool time_kernel,
|
| 44 |
+
int M,
|
| 45 |
+
int N,
|
| 46 |
+
int K,
|
| 47 |
+
int StrideA,
|
| 48 |
+
int StrideB,
|
| 49 |
+
int StrideE,
|
| 50 |
+
int n_warmup,
|
| 51 |
+
int n_iter,
|
| 52 |
+
uint64_t rotating = 0)
|
| 53 |
+
{
|
| 54 |
+
bool pass = true;
|
| 55 |
+
|
| 56 |
+
auto f_host_tensor_descriptor =
|
| 57 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 58 |
+
using namespace ck::literals;
|
| 59 |
+
|
| 60 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 61 |
+
{
|
| 62 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 63 |
+
}
|
| 64 |
+
else
|
| 65 |
+
{
|
| 66 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 67 |
+
}
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
ck::index_t Scale_Stride_AM = ck::is_same_v<ALayout, tensor_layout::gemm::RowMajor>
|
| 71 |
+
? ((K + ScaleBlockK - 1) / ScaleBlockK)
|
| 72 |
+
: ((M + ScaleBlockM - 1) / ScaleBlockM);
|
| 73 |
+
ck::index_t Scale_Stride_BN = ck::is_same_v<BLayout, ck::tensor_layout::gemm::ColumnMajor>
|
| 74 |
+
? ((K + ScaleBlockK - 1) / ScaleBlockK)
|
| 75 |
+
: ((N + ScaleBlockN - 1) / ScaleBlockN);
|
| 76 |
+
|
| 77 |
+
Tensor<A0DataType> a0_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 78 |
+
Tensor<A1DataType> a1_m_k(f_host_tensor_descriptor((M + ScaleBlockM - 1) / ScaleBlockM,
|
| 79 |
+
(K + ScaleBlockK - 1) / ScaleBlockK,
|
| 80 |
+
Scale_Stride_AM,
|
| 81 |
+
ALayout{}));
|
| 82 |
+
Tensor<B0DataType> b0_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 83 |
+
Tensor<B1DataType> b1_k_n(f_host_tensor_descriptor((K + ScaleBlockK - 1) / ScaleBlockK,
|
| 84 |
+
(N + ScaleBlockN - 1) / ScaleBlockN,
|
| 85 |
+
Scale_Stride_BN,
|
| 86 |
+
BLayout{}));
|
| 87 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 88 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 89 |
+
|
| 90 |
+
int total_gemm_needed =
|
| 91 |
+
a0_m_k.GetElementSpaceSizeInBytes() + b0_k_n.GetElementSpaceSizeInBytes() +
|
| 92 |
+
a1_m_k.GetElementSpaceSizeInBytes() + b1_k_n.GetElementSpaceSizeInBytes();
|
| 93 |
+
int rotating_count = std::max(
|
| 94 |
+
1,
|
| 95 |
+
std::min(n_iter,
|
| 96 |
+
static_cast<int>(std::ceil(static_cast<double>(rotating) / total_gemm_needed))));
|
| 97 |
+
|
| 98 |
+
std::cout << "a0_m_k: " << a0_m_k.mDesc << std::endl;
|
| 99 |
+
std::cout << "a1_m_k: " << a1_m_k.mDesc << std::endl;
|
| 100 |
+
std::cout << "b0_k_n: " << b0_k_n.mDesc << std::endl;
|
| 101 |
+
std::cout << "b1_k_n: " << b1_k_n.mDesc << std::endl;
|
| 102 |
+
std::cout << "e_m_n: " << e_m_n_device_result.mDesc << std::endl;
|
| 103 |
+
std::cout << "rotating count: " << rotating_count << std::endl;
|
| 104 |
+
|
| 105 |
+
switch(init_method)
|
| 106 |
+
{
|
| 107 |
+
case 0: break;
|
| 108 |
+
case 1:
|
| 109 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_2<A0DataType>{-2, 2});
|
| 110 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-2, 2});
|
| 111 |
+
a1_m_k.GenerateTensorValue(GeneratorTensor_3<A1DataType>{0, 1.0});
|
| 112 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_3<B1DataType>{0, 1.0});
|
| 113 |
+
break;
|
| 114 |
+
default:
|
| 115 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_3<A0DataType>{-0.5, 0.5});
|
| 116 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_3<B0DataType>{-0.5, 0.5});
|
| 117 |
+
a1_m_k.GenerateTensorValue(GeneratorTensor_3<A1DataType>{0, 1.0});
|
| 118 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_3<B1DataType>{0, 1.0});
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 122 |
+
|
| 123 |
+
using AElementOp = PassThrough;
|
| 124 |
+
using BElementOp = PassThrough;
|
| 125 |
+
using CElementOp = PassThrough;
|
| 126 |
+
|
| 127 |
+
const auto a_element_op = AElementOp{};
|
| 128 |
+
const auto b_element_op = BElementOp{};
|
| 129 |
+
const auto c_element_op = CElementOp{};
|
| 130 |
+
|
| 131 |
+
DeviceMem a0_device_buf(sizeof(A0DataType) * a0_m_k.mDesc.GetElementSpaceSize());
|
| 132 |
+
DeviceMem b0_device_buf(sizeof(B0DataType) * b0_k_n.mDesc.GetElementSpaceSize());
|
| 133 |
+
DeviceMem a1_device_buf(sizeof(A1DataType) * a1_m_k.mDesc.GetElementSpaceSize());
|
| 134 |
+
DeviceMem b1_device_buf(sizeof(B1DataType) * b1_k_n.mDesc.GetElementSpaceSize());
|
| 135 |
+
DeviceMem c_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 136 |
+
|
| 137 |
+
a0_device_buf.ToDevice(a0_m_k.mData.data());
|
| 138 |
+
b0_device_buf.ToDevice(b0_k_n.mData.data());
|
| 139 |
+
a1_device_buf.ToDevice(a1_m_k.mData.data());
|
| 140 |
+
b1_device_buf.ToDevice(b1_k_n.mData.data());
|
| 141 |
+
|
| 142 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGemmMultipleD_ABScale<ALayout,
|
| 143 |
+
BLayout,
|
| 144 |
+
ck::Tuple<>,
|
| 145 |
+
ELayout,
|
| 146 |
+
A0DataType,
|
| 147 |
+
A1DataType,
|
| 148 |
+
B0DataType,
|
| 149 |
+
B1DataType,
|
| 150 |
+
ck::Tuple<>,
|
| 151 |
+
EDataType,
|
| 152 |
+
ScaleBlockM,
|
| 153 |
+
ScaleBlockN,
|
| 154 |
+
ScaleBlockK,
|
| 155 |
+
AElementOp,
|
| 156 |
+
BElementOp,
|
| 157 |
+
CElementOp>;
|
| 158 |
+
|
| 159 |
+
// get device op instances
|
| 160 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 161 |
+
DeviceOp>::GetInstances();
|
| 162 |
+
|
| 163 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 164 |
+
|
| 165 |
+
// Run reference GEMM
|
| 166 |
+
if(do_verification)
|
| 167 |
+
{
|
| 168 |
+
Tensor<AccDataType> c_m_n({M, N});
|
| 169 |
+
Tensor<float> a_m_k({M, K});
|
| 170 |
+
Tensor<float> b_k_n({K, N});
|
| 171 |
+
|
| 172 |
+
for(int m = 0; m < M; m++)
|
| 173 |
+
{
|
| 174 |
+
for(int k = 0; k < K; k++)
|
| 175 |
+
{
|
| 176 |
+
a_m_k(m, k) = ck::type_convert<float>(a0_m_k(m, k)) *
|
| 177 |
+
a1_m_k(m / ScaleBlockM, k / ScaleBlockK);
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
for(int n = 0; n < N; n++)
|
| 182 |
+
{
|
| 183 |
+
for(int k = 0; k < K; k++)
|
| 184 |
+
{
|
| 185 |
+
b_k_n(k, n) = ck::type_convert<float>(b0_k_n(k, n)) *
|
| 186 |
+
b1_k_n(k / ScaleBlockK, n / ScaleBlockN);
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<float,
|
| 191 |
+
float,
|
| 192 |
+
AccDataType,
|
| 193 |
+
AccDataType,
|
| 194 |
+
AElementOp,
|
| 195 |
+
BElementOp,
|
| 196 |
+
PassThrough,
|
| 197 |
+
float>;
|
| 198 |
+
|
| 199 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 200 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 201 |
+
|
| 202 |
+
auto ref_argument =
|
| 203 |
+
ref_gemm.MakeArgument(a_m_k, b_k_n, c_m_n, PassThrough{}, PassThrough{}, PassThrough{});
|
| 204 |
+
|
| 205 |
+
ref_invoker.Run(ref_argument);
|
| 206 |
+
|
| 207 |
+
for(int m = 0; m < M; ++m)
|
| 208 |
+
{
|
| 209 |
+
for(int n = 0; n < N; ++n)
|
| 210 |
+
{
|
| 211 |
+
e_m_n_host_result(m, n) = ck::type_convert<EDataType>(c_m_n(m, n));
|
| 212 |
+
}
|
| 213 |
+
}
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
std::string best_op_name;
|
| 217 |
+
float best_ave_time = 0;
|
| 218 |
+
float best_tflops = 0;
|
| 219 |
+
float best_gb_per_sec = 0;
|
| 220 |
+
|
| 221 |
+
// profile device GEMM instances
|
| 222 |
+
for(auto& op_ptr : op_ptrs)
|
| 223 |
+
{
|
| 224 |
+
auto argument_ptr =
|
| 225 |
+
op_ptr->MakeArgumentPointer(static_cast<A0DataType*>(a0_device_buf.GetDeviceBuffer()),
|
| 226 |
+
static_cast<B0DataType*>(b0_device_buf.GetDeviceBuffer()),
|
| 227 |
+
std::array<const void*, 0>{},
|
| 228 |
+
static_cast<EDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 229 |
+
M,
|
| 230 |
+
N,
|
| 231 |
+
K,
|
| 232 |
+
StrideA,
|
| 233 |
+
StrideB,
|
| 234 |
+
std::array<ck::index_t, 0>{},
|
| 235 |
+
StrideE,
|
| 236 |
+
a1_device_buf.GetDeviceBuffer(),
|
| 237 |
+
b1_device_buf.GetDeviceBuffer(),
|
| 238 |
+
a_element_op,
|
| 239 |
+
b_element_op,
|
| 240 |
+
c_element_op);
|
| 241 |
+
|
| 242 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 243 |
+
|
| 244 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 245 |
+
{
|
| 246 |
+
|
| 247 |
+
// re-init C to zero before profiling next kernel
|
| 248 |
+
c_device_buf.SetZero();
|
| 249 |
+
|
| 250 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 251 |
+
|
| 252 |
+
if(do_verification)
|
| 253 |
+
{
|
| 254 |
+
c_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
| 255 |
+
|
| 256 |
+
#if defined CK_ENABLE_FP8
|
| 257 |
+
// set softer tolerances for fp8
|
| 258 |
+
if constexpr(is_same_v<A0DataType, f8_t> || is_same_v<B0DataType, f8_t> ||
|
| 259 |
+
is_same_v<EDataType, f8_t>)
|
| 260 |
+
{
|
| 261 |
+
std::string msg = "Error: Incorrect results!";
|
| 262 |
+
double rtol = 5e-2;
|
| 263 |
+
double atol = 5e-2;
|
| 264 |
+
pass = pass & ck::utils::check_err(
|
| 265 |
+
e_m_n_device_result, e_m_n_host_result, msg, rtol, atol);
|
| 266 |
+
}
|
| 267 |
+
else
|
| 268 |
+
{
|
| 269 |
+
#endif
|
| 270 |
+
pass = pass & ck::utils::check_err(e_m_n_device_result, e_m_n_host_result);
|
| 271 |
+
#if defined CK_ENABLE_FP8
|
| 272 |
+
}
|
| 273 |
+
#endif
|
| 274 |
+
|
| 275 |
+
if(do_log)
|
| 276 |
+
{
|
| 277 |
+
LogRangeAsType<float>(std::cout << "a : ", a0_m_k.mData, ",") << std::endl;
|
| 278 |
+
LogRangeAsType<float>(std::cout << "b: ", b0_k_n.mData, ",") << std::endl;
|
| 279 |
+
LogRangeAsType<float>(std::cout << "c_host : ", e_m_n_host_result.mData, ",")
|
| 280 |
+
<< std::endl;
|
| 281 |
+
LogRangeAsType<float>(std::cout << "c_device: ", e_m_n_device_result.mData, ",")
|
| 282 |
+
<< std::endl;
|
| 283 |
+
}
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 287 |
+
|
| 288 |
+
float ave_time = invoker_ptr->Run(
|
| 289 |
+
argument_ptr.get(),
|
| 290 |
+
StreamConfig{
|
| 291 |
+
nullptr, time_kernel, 0, n_warmup, n_iter, rotating_count > 1, rotating_count});
|
| 292 |
+
|
| 293 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 294 |
+
|
| 295 |
+
std::size_t num_btype =
|
| 296 |
+
sizeof(A0DataType) * M * K + sizeof(B0DataType) * K * N + sizeof(EDataType) * M * N;
|
| 297 |
+
|
| 298 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 299 |
+
|
| 300 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 301 |
+
|
| 302 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, "
|
| 303 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 304 |
+
|
| 305 |
+
if(tflops > best_tflops)
|
| 306 |
+
{
|
| 307 |
+
best_op_name = op_name;
|
| 308 |
+
best_tflops = tflops;
|
| 309 |
+
best_ave_time = ave_time;
|
| 310 |
+
best_gb_per_sec = gb_per_sec;
|
| 311 |
+
}
|
| 312 |
+
}
|
| 313 |
+
else
|
| 314 |
+
{
|
| 315 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 316 |
+
}
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
if constexpr(is_same<EDataType, float>::value)
|
| 320 |
+
{
|
| 321 |
+
std::cout << "Best Perf for datatype = f32";
|
| 322 |
+
}
|
| 323 |
+
else if constexpr(is_same<EDataType, half_t>::value)
|
| 324 |
+
{
|
| 325 |
+
std::cout << "Best Perf for datatype = f16";
|
| 326 |
+
}
|
| 327 |
+
else if constexpr(is_same<EDataType, bhalf_t>::value)
|
| 328 |
+
{
|
| 329 |
+
std::cout << "Best Perf for datatype = bf16";
|
| 330 |
+
}
|
| 331 |
+
else if constexpr(is_same<EDataType, int8_t>::value)
|
| 332 |
+
{
|
| 333 |
+
std::cout << "Best Perf for datatype = int8";
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value)
|
| 337 |
+
{
|
| 338 |
+
std::cout << " ALayout = RowMajor";
|
| 339 |
+
}
|
| 340 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 341 |
+
{
|
| 342 |
+
std::cout << " ALayout = ColumnMajor";
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
if constexpr(is_same<BLayout, tensor_layout::gemm::RowMajor>::value)
|
| 346 |
+
{
|
| 347 |
+
std::cout << " BLayout = RowMajor";
|
| 348 |
+
}
|
| 349 |
+
else if constexpr(is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 350 |
+
{
|
| 351 |
+
std::cout << " BLayout = ColumnMajor";
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
std::cout << " M = " << M << " N = " << N << " K = " << K << " StrideA = " << StrideA
|
| 355 |
+
<< " StrideB = " << StrideB << " StrideE = " << StrideE << " : " << best_ave_time
|
| 356 |
+
<< " ms, " << best_tflops << " TFlops, " << best_gb_per_sec << " GB/s, "
|
| 357 |
+
<< best_op_name << std::endl;
|
| 358 |
+
|
| 359 |
+
return pass;
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
} // namespace profiler
|
| 363 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_add_fastgelu_impl.hpp
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 12 |
+
|
| 13 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_add_fastgelu.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/utility/check_err.hpp"
|
| 16 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 17 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 18 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 19 |
+
#include "ck/library/utility/literals.hpp"
|
| 20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 21 |
+
|
| 22 |
+
namespace ck {
|
| 23 |
+
namespace profiler {
|
| 24 |
+
|
| 25 |
+
template <typename ADataType,
|
| 26 |
+
typename BDataType,
|
| 27 |
+
typename AccDataType,
|
| 28 |
+
typename D0DataType,
|
| 29 |
+
typename EDataType,
|
| 30 |
+
typename ALayout,
|
| 31 |
+
typename BLayout,
|
| 32 |
+
typename D0Layout,
|
| 33 |
+
typename ELayout>
|
| 34 |
+
bool profile_gemm_add_fastgelu_impl(int do_verification,
|
| 35 |
+
int init_method,
|
| 36 |
+
bool /*do_log*/,
|
| 37 |
+
bool time_kernel,
|
| 38 |
+
int M,
|
| 39 |
+
int N,
|
| 40 |
+
int K,
|
| 41 |
+
int StrideA,
|
| 42 |
+
int StrideB,
|
| 43 |
+
int StrideD0,
|
| 44 |
+
int StrideE)
|
| 45 |
+
{
|
| 46 |
+
auto f_host_tensor_descriptor =
|
| 47 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 48 |
+
using namespace ck::literals;
|
| 49 |
+
|
| 50 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 51 |
+
{
|
| 52 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 53 |
+
}
|
| 54 |
+
else
|
| 55 |
+
{
|
| 56 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 57 |
+
}
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 61 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 62 |
+
Tensor<D0DataType> d0_m_n(f_host_tensor_descriptor(M, N, StrideD0, D0Layout{}));
|
| 63 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 64 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 65 |
+
|
| 66 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 67 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 68 |
+
std::cout << "d0_m_n: " << d0_m_n.mDesc << std::endl;
|
| 69 |
+
std::cout << "e_m_n: " << e_m_n_device_result.mDesc << std::endl;
|
| 70 |
+
|
| 71 |
+
switch(init_method)
|
| 72 |
+
{
|
| 73 |
+
case 0: break;
|
| 74 |
+
case 1:
|
| 75 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
| 76 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
| 77 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_2<D0DataType>{-5, 5});
|
| 78 |
+
break;
|
| 79 |
+
default:
|
| 80 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 81 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
| 82 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{0.0, 1.0});
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 86 |
+
using AddFastGelu = ck::tensor_operation::element_wise::AddFastGelu;
|
| 87 |
+
|
| 88 |
+
using AElementOp = PassThrough;
|
| 89 |
+
using BElementOp = PassThrough;
|
| 90 |
+
using CDEElementOp = AddFastGelu;
|
| 91 |
+
|
| 92 |
+
const auto a_element_op = AElementOp{};
|
| 93 |
+
const auto b_element_op = BElementOp{};
|
| 94 |
+
const auto cde_element_op = CDEElementOp{};
|
| 95 |
+
|
| 96 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGemmMultipleD<
|
| 97 |
+
ALayout,
|
| 98 |
+
BLayout,
|
| 99 |
+
ck::Tuple<D0Layout>,
|
| 100 |
+
ELayout,
|
| 101 |
+
ADataType,
|
| 102 |
+
BDataType,
|
| 103 |
+
ck::Tuple<D0DataType>,
|
| 104 |
+
EDataType,
|
| 105 |
+
ck::tensor_operation::element_wise::PassThrough,
|
| 106 |
+
ck::tensor_operation::element_wise::PassThrough,
|
| 107 |
+
ck::tensor_operation::element_wise::AddFastGelu>;
|
| 108 |
+
|
| 109 |
+
// get device op instances
|
| 110 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 111 |
+
DeviceOp>::GetInstances();
|
| 112 |
+
|
| 113 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 114 |
+
|
| 115 |
+
// run reference
|
| 116 |
+
if(do_verification)
|
| 117 |
+
{
|
| 118 |
+
Tensor<AccDataType> c_m_n({M, N});
|
| 119 |
+
|
| 120 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 121 |
+
BDataType,
|
| 122 |
+
AccDataType,
|
| 123 |
+
AccDataType,
|
| 124 |
+
AElementOp,
|
| 125 |
+
BElementOp,
|
| 126 |
+
PassThrough>;
|
| 127 |
+
|
| 128 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 129 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 130 |
+
|
| 131 |
+
auto ref_argument =
|
| 132 |
+
ref_gemm.MakeArgument(a_m_k, b_k_n, c_m_n, a_element_op, b_element_op, PassThrough{});
|
| 133 |
+
|
| 134 |
+
ref_invoker.Run(ref_argument);
|
| 135 |
+
|
| 136 |
+
for(int m = 0; m < M; ++m)
|
| 137 |
+
{
|
| 138 |
+
for(int n = 0; n < N; ++n)
|
| 139 |
+
{
|
| 140 |
+
cde_element_op(e_m_n_host_result(m, n), c_m_n(m, n), d0_m_n(m, n));
|
| 141 |
+
}
|
| 142 |
+
}
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 146 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 147 |
+
DeviceMem d0_m_n_device_buf(sizeof(D0DataType) * d0_m_n.mDesc.GetElementSpaceSize());
|
| 148 |
+
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 149 |
+
|
| 150 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 151 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 152 |
+
d0_m_n_device_buf.ToDevice(d0_m_n.mData.data());
|
| 153 |
+
|
| 154 |
+
std::string best_op_name;
|
| 155 |
+
float best_ave_time = 0;
|
| 156 |
+
float best_tflops = 0;
|
| 157 |
+
float best_gb_per_sec = 0;
|
| 158 |
+
|
| 159 |
+
bool pass = true;
|
| 160 |
+
|
| 161 |
+
// profile device operation instances
|
| 162 |
+
for(auto& op_ptr : op_ptrs)
|
| 163 |
+
{
|
| 164 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 165 |
+
a_device_buf.GetDeviceBuffer(),
|
| 166 |
+
b_device_buf.GetDeviceBuffer(),
|
| 167 |
+
std::array<const void*, 1>{d0_m_n_device_buf.GetDeviceBuffer()},
|
| 168 |
+
e_device_buf.GetDeviceBuffer(),
|
| 169 |
+
M,
|
| 170 |
+
N,
|
| 171 |
+
K,
|
| 172 |
+
StrideA,
|
| 173 |
+
StrideB,
|
| 174 |
+
std::array<ck::index_t, 1>{StrideD0},
|
| 175 |
+
StrideE,
|
| 176 |
+
a_element_op,
|
| 177 |
+
b_element_op,
|
| 178 |
+
cde_element_op);
|
| 179 |
+
|
| 180 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 181 |
+
|
| 182 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 183 |
+
|
| 184 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 185 |
+
{
|
| 186 |
+
// re-init E to zero before profiling a kernel
|
| 187 |
+
e_device_buf.SetZero();
|
| 188 |
+
|
| 189 |
+
float ave_time =
|
| 190 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 191 |
+
|
| 192 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 193 |
+
|
| 194 |
+
std::size_t num_btype =
|
| 195 |
+
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(EDataType) * M * N;
|
| 196 |
+
|
| 197 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 198 |
+
|
| 199 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 200 |
+
|
| 201 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, "
|
| 202 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 203 |
+
|
| 204 |
+
if(tflops > best_tflops)
|
| 205 |
+
{
|
| 206 |
+
best_op_name = op_name;
|
| 207 |
+
best_tflops = tflops;
|
| 208 |
+
best_ave_time = ave_time;
|
| 209 |
+
best_gb_per_sec = gb_per_sec;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
if(do_verification)
|
| 213 |
+
{
|
| 214 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
| 215 |
+
|
| 216 |
+
pass = pass && ck::utils::check_err(e_m_n_device_result, e_m_n_host_result);
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
else
|
| 220 |
+
{
|
| 221 |
+
std::cout << op_name << " does not support this problem" << std::endl;
|
| 222 |
+
}
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 226 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
| 227 |
+
|
| 228 |
+
return pass;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
} // namespace profiler
|
| 232 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_add_impl.hpp
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 12 |
+
|
| 13 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_add.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/utility/check_err.hpp"
|
| 16 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 17 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 18 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 19 |
+
#include "ck/library/utility/literals.hpp"
|
| 20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 21 |
+
|
| 22 |
+
namespace ck {
|
| 23 |
+
namespace profiler {
|
| 24 |
+
|
| 25 |
+
template <typename ADataType,
|
| 26 |
+
typename BDataType,
|
| 27 |
+
typename AccDataType,
|
| 28 |
+
typename D0DataType,
|
| 29 |
+
typename EDataType,
|
| 30 |
+
typename ALayout,
|
| 31 |
+
typename BLayout,
|
| 32 |
+
typename D0Layout,
|
| 33 |
+
typename ELayout>
|
| 34 |
+
bool profile_gemm_add_impl(int do_verification,
|
| 35 |
+
int init_method,
|
| 36 |
+
bool /*do_log*/,
|
| 37 |
+
bool time_kernel,
|
| 38 |
+
int M,
|
| 39 |
+
int N,
|
| 40 |
+
int K,
|
| 41 |
+
int StrideA,
|
| 42 |
+
int StrideB,
|
| 43 |
+
int StrideD0,
|
| 44 |
+
int StrideE)
|
| 45 |
+
{
|
| 46 |
+
auto f_host_tensor_descriptor =
|
| 47 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 48 |
+
using namespace ck::literals;
|
| 49 |
+
|
| 50 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 51 |
+
{
|
| 52 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 53 |
+
}
|
| 54 |
+
else
|
| 55 |
+
{
|
| 56 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 57 |
+
}
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 61 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 62 |
+
Tensor<D0DataType> d0_m_n(f_host_tensor_descriptor(M, N, StrideD0, D0Layout{}));
|
| 63 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 64 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 65 |
+
|
| 66 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 67 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 68 |
+
std::cout << "d0_m_n: " << d0_m_n.mDesc << std::endl;
|
| 69 |
+
std::cout << "e_m_n: " << e_m_n_device_result.mDesc << std::endl;
|
| 70 |
+
|
| 71 |
+
switch(init_method)
|
| 72 |
+
{
|
| 73 |
+
case 0: break;
|
| 74 |
+
case 1:
|
| 75 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
| 76 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
| 77 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_2<D0DataType>{-5, 5});
|
| 78 |
+
break;
|
| 79 |
+
default:
|
| 80 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 81 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
| 82 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{0.0, 1.0});
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 86 |
+
using Add = ck::tensor_operation::element_wise::Add;
|
| 87 |
+
|
| 88 |
+
using AElementOp = PassThrough;
|
| 89 |
+
using BElementOp = PassThrough;
|
| 90 |
+
using CDEElementOp = Add;
|
| 91 |
+
|
| 92 |
+
const auto a_element_op = AElementOp{};
|
| 93 |
+
const auto b_element_op = BElementOp{};
|
| 94 |
+
const auto cde_element_op = CDEElementOp{};
|
| 95 |
+
|
| 96 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGemmMultipleD<
|
| 97 |
+
ALayout,
|
| 98 |
+
BLayout,
|
| 99 |
+
ck::Tuple<D0Layout>,
|
| 100 |
+
ELayout,
|
| 101 |
+
ADataType,
|
| 102 |
+
BDataType,
|
| 103 |
+
ck::Tuple<D0DataType>,
|
| 104 |
+
EDataType,
|
| 105 |
+
ck::tensor_operation::element_wise::PassThrough,
|
| 106 |
+
ck::tensor_operation::element_wise::PassThrough,
|
| 107 |
+
ck::tensor_operation::element_wise::Add>;
|
| 108 |
+
|
| 109 |
+
// get device op instances
|
| 110 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 111 |
+
DeviceOp>::GetInstances();
|
| 112 |
+
|
| 113 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 114 |
+
|
| 115 |
+
// run reference
|
| 116 |
+
if(do_verification)
|
| 117 |
+
{
|
| 118 |
+
Tensor<AccDataType> c_m_n({M, N});
|
| 119 |
+
|
| 120 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 121 |
+
BDataType,
|
| 122 |
+
AccDataType,
|
| 123 |
+
AccDataType,
|
| 124 |
+
AElementOp,
|
| 125 |
+
BElementOp,
|
| 126 |
+
PassThrough>;
|
| 127 |
+
|
| 128 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 129 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 130 |
+
|
| 131 |
+
auto ref_argument =
|
| 132 |
+
ref_gemm.MakeArgument(a_m_k, b_k_n, c_m_n, a_element_op, b_element_op, PassThrough{});
|
| 133 |
+
|
| 134 |
+
ref_invoker.Run(ref_argument);
|
| 135 |
+
|
| 136 |
+
for(int m = 0; m < M; ++m)
|
| 137 |
+
{
|
| 138 |
+
for(int n = 0; n < N; ++n)
|
| 139 |
+
{
|
| 140 |
+
cde_element_op(e_m_n_host_result(m, n), c_m_n(m, n), d0_m_n(m, n));
|
| 141 |
+
}
|
| 142 |
+
}
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 146 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 147 |
+
DeviceMem d0_m_n_device_buf(sizeof(D0DataType) * d0_m_n.mDesc.GetElementSpaceSize());
|
| 148 |
+
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 149 |
+
|
| 150 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 151 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 152 |
+
d0_m_n_device_buf.ToDevice(d0_m_n.mData.data());
|
| 153 |
+
|
| 154 |
+
std::string best_op_name;
|
| 155 |
+
float best_ave_time = 0;
|
| 156 |
+
float best_tflops = 0;
|
| 157 |
+
float best_gb_per_sec = 0;
|
| 158 |
+
|
| 159 |
+
bool pass = true;
|
| 160 |
+
|
| 161 |
+
// profile device operation instances
|
| 162 |
+
for(auto& op_ptr : op_ptrs)
|
| 163 |
+
{
|
| 164 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 165 |
+
a_device_buf.GetDeviceBuffer(),
|
| 166 |
+
b_device_buf.GetDeviceBuffer(),
|
| 167 |
+
std::array<const void*, 1>{d0_m_n_device_buf.GetDeviceBuffer()},
|
| 168 |
+
e_device_buf.GetDeviceBuffer(),
|
| 169 |
+
M,
|
| 170 |
+
N,
|
| 171 |
+
K,
|
| 172 |
+
StrideA,
|
| 173 |
+
StrideB,
|
| 174 |
+
std::array<ck::index_t, 1>{StrideD0},
|
| 175 |
+
StrideE,
|
| 176 |
+
a_element_op,
|
| 177 |
+
b_element_op,
|
| 178 |
+
cde_element_op);
|
| 179 |
+
|
| 180 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 181 |
+
|
| 182 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 183 |
+
|
| 184 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 185 |
+
{
|
| 186 |
+
// re-init E to zero before profiling a kernel
|
| 187 |
+
e_device_buf.SetZero();
|
| 188 |
+
|
| 189 |
+
float ave_time =
|
| 190 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 191 |
+
|
| 192 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 193 |
+
|
| 194 |
+
std::size_t num_btype =
|
| 195 |
+
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(EDataType) * M * N;
|
| 196 |
+
|
| 197 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 198 |
+
|
| 199 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 200 |
+
|
| 201 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, "
|
| 202 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 203 |
+
|
| 204 |
+
if(tflops > best_tflops)
|
| 205 |
+
{
|
| 206 |
+
best_op_name = op_name;
|
| 207 |
+
best_tflops = tflops;
|
| 208 |
+
best_ave_time = ave_time;
|
| 209 |
+
best_gb_per_sec = gb_per_sec;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
if(do_verification)
|
| 213 |
+
{
|
| 214 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
| 215 |
+
|
| 216 |
+
pass = pass && ck::utils::check_err(e_m_n_device_result, e_m_n_host_result);
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
else
|
| 220 |
+
{
|
| 221 |
+
std::cout << op_name << " does not support this problem" << std::endl;
|
| 222 |
+
}
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 226 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
| 227 |
+
|
| 228 |
+
return pass;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
} // namespace profiler
|
| 232 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_add_relu_add_layernorm_impl.hpp
ADDED
|
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d_layernorm.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 12 |
+
|
| 13 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_add_relu_add_layernorm.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/utility/check_err.hpp"
|
| 16 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 17 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 18 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 19 |
+
#include "ck/library/utility/literals.hpp"
|
| 20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 21 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_layernorm.hpp"
|
| 22 |
+
|
| 23 |
+
namespace ck {
|
| 24 |
+
namespace profiler {
|
| 25 |
+
|
| 26 |
+
template <typename ADataType,
|
| 27 |
+
typename BDataType,
|
| 28 |
+
typename AccDataType,
|
| 29 |
+
typename D0DataType,
|
| 30 |
+
typename D1DataType,
|
| 31 |
+
typename EMeanVarDataType,
|
| 32 |
+
typename GammaDataType,
|
| 33 |
+
typename BetaDataType,
|
| 34 |
+
typename HDataType,
|
| 35 |
+
typename AElementOp,
|
| 36 |
+
typename BElementOp,
|
| 37 |
+
typename CDEElementOp,
|
| 38 |
+
typename HElementOp>
|
| 39 |
+
void host_gemm_layernorm(Tensor<HDataType>& h_m_n,
|
| 40 |
+
const Tensor<ADataType>& a_m_k,
|
| 41 |
+
const Tensor<BDataType>& b_k_n,
|
| 42 |
+
const Tensor<D0DataType>& d0_m_n,
|
| 43 |
+
const Tensor<D1DataType>& d1_m_n,
|
| 44 |
+
const Tensor<GammaDataType>& gamma_n,
|
| 45 |
+
const Tensor<BetaDataType>& beta_n,
|
| 46 |
+
AElementOp a_element_op,
|
| 47 |
+
BElementOp b_element_op,
|
| 48 |
+
CDEElementOp cde_element_op,
|
| 49 |
+
HElementOp h_element_op,
|
| 50 |
+
int M,
|
| 51 |
+
int N,
|
| 52 |
+
AccDataType epsilon = 1e-5)
|
| 53 |
+
{
|
| 54 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 55 |
+
|
| 56 |
+
using ReferenceGemm = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 57 |
+
BDataType,
|
| 58 |
+
AccDataType,
|
| 59 |
+
AccDataType,
|
| 60 |
+
AElementOp,
|
| 61 |
+
BElementOp,
|
| 62 |
+
PassThrough>;
|
| 63 |
+
|
| 64 |
+
using ReferenceLayernorm = ck::tensor_operation::host::ReferenceLayernorm<EMeanVarDataType,
|
| 65 |
+
GammaDataType,
|
| 66 |
+
BetaDataType,
|
| 67 |
+
HDataType,
|
| 68 |
+
AccDataType,
|
| 69 |
+
AccDataType,
|
| 70 |
+
HElementOp,
|
| 71 |
+
2,
|
| 72 |
+
1>;
|
| 73 |
+
|
| 74 |
+
Tensor<EMeanVarDataType> e_m_n(HostTensorDescriptor{M, N});
|
| 75 |
+
Tensor<AccDataType> c_m_n(HostTensorDescriptor{M, N});
|
| 76 |
+
Tensor<AccDataType> save_mean({M});
|
| 77 |
+
Tensor<AccDataType> save_inv_std({M});
|
| 78 |
+
|
| 79 |
+
auto ref_gemm = ReferenceGemm{};
|
| 80 |
+
auto ref_gemm_invoker = ref_gemm.MakeInvoker();
|
| 81 |
+
|
| 82 |
+
auto ref_gemm_argument =
|
| 83 |
+
ref_gemm.MakeArgument(a_m_k, b_k_n, c_m_n, a_element_op, b_element_op, PassThrough{});
|
| 84 |
+
|
| 85 |
+
ref_gemm_invoker.Run(ref_gemm_argument);
|
| 86 |
+
|
| 87 |
+
for(int n = 0; n < N; ++n)
|
| 88 |
+
{
|
| 89 |
+
for(int m = 0; m < M; ++m)
|
| 90 |
+
{
|
| 91 |
+
AccDataType e = static_cast<AccDataType>(e_m_n(m, n));
|
| 92 |
+
AccDataType d0 = static_cast<AccDataType>(d0_m_n(m, n));
|
| 93 |
+
AccDataType d1 = static_cast<AccDataType>(d1_m_n(m, n));
|
| 94 |
+
cde_element_op(e, c_m_n(m, n), d0, d1);
|
| 95 |
+
e_m_n(m, n) = static_cast<EMeanVarDataType>(e);
|
| 96 |
+
}
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
ReferenceLayernorm ref_layernorm;
|
| 100 |
+
auto ref_layernorm_invoker = ref_layernorm.MakeInvoker();
|
| 101 |
+
|
| 102 |
+
auto ref_layernorm_argument = ref_layernorm.MakeArgument(
|
| 103 |
+
e_m_n, gamma_n, beta_n, h_m_n, save_mean, save_inv_std, h_element_op, {M, N}, {1}, epsilon);
|
| 104 |
+
ref_layernorm_invoker.Run(ref_layernorm_argument);
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
template <typename ADataType,
|
| 108 |
+
typename BDataType,
|
| 109 |
+
typename AccDataType,
|
| 110 |
+
typename D0DataType,
|
| 111 |
+
typename D1DataType,
|
| 112 |
+
typename EMeanVarDataType,
|
| 113 |
+
typename GammaDataType,
|
| 114 |
+
typename BetaDataType,
|
| 115 |
+
typename HDataType,
|
| 116 |
+
typename ALayout,
|
| 117 |
+
typename BLayout,
|
| 118 |
+
typename D0Layout,
|
| 119 |
+
typename D1Layout,
|
| 120 |
+
typename HLayout>
|
| 121 |
+
bool profile_gemm_add_relu_add_layernorm_impl(int do_verification,
|
| 122 |
+
int init_method,
|
| 123 |
+
bool /*do_log*/,
|
| 124 |
+
bool time_kernel,
|
| 125 |
+
int M,
|
| 126 |
+
int N,
|
| 127 |
+
int K,
|
| 128 |
+
int StrideA,
|
| 129 |
+
int StrideB,
|
| 130 |
+
int StrideD0,
|
| 131 |
+
int StrideD1,
|
| 132 |
+
int StrideH,
|
| 133 |
+
AccDataType epsilon = 1e-5)
|
| 134 |
+
{
|
| 135 |
+
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
|
| 136 |
+
return HostTensorDescriptor({len}, {stride});
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
auto f_host_tensor_descriptor2d =
|
| 140 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 141 |
+
using namespace ck::literals;
|
| 142 |
+
|
| 143 |
+
if constexpr(std::is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 144 |
+
{
|
| 145 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 146 |
+
}
|
| 147 |
+
else
|
| 148 |
+
{
|
| 149 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 150 |
+
}
|
| 151 |
+
};
|
| 152 |
+
|
| 153 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor2d(M, K, StrideA, ALayout{}));
|
| 154 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor2d(K, N, StrideB, BLayout{}));
|
| 155 |
+
Tensor<D1DataType> d0_m_n(f_host_tensor_descriptor2d(M, N, StrideD0, D0Layout{}));
|
| 156 |
+
Tensor<D1DataType> d1_m_n(f_host_tensor_descriptor2d(M, N, StrideD1, D1Layout{}));
|
| 157 |
+
Tensor<GammaDataType> gamma_n(f_host_tensor_descriptor1d(N, 1));
|
| 158 |
+
Tensor<BetaDataType> beta_n(f_host_tensor_descriptor1d(N, 1));
|
| 159 |
+
Tensor<HDataType> h_m_n(f_host_tensor_descriptor2d(M, N, StrideH, HLayout{}));
|
| 160 |
+
Tensor<HDataType> h_m_n_host(f_host_tensor_descriptor2d(M, N, StrideH, HLayout{}));
|
| 161 |
+
|
| 162 |
+
switch(init_method)
|
| 163 |
+
{
|
| 164 |
+
case 0: break;
|
| 165 |
+
case 1:
|
| 166 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{-1, 1});
|
| 167 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-1, 1});
|
| 168 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_3<D1DataType>{-1, 1});
|
| 169 |
+
d1_m_n.GenerateTensorValue(GeneratorTensor_3<D1DataType>{-1, 1});
|
| 170 |
+
gamma_n.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{-1, 1});
|
| 171 |
+
beta_n.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{-1, 1});
|
| 172 |
+
break;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 176 |
+
using AddReluAdd = ck::tensor_operation::element_wise::AddReluAdd;
|
| 177 |
+
|
| 178 |
+
using AElementOp = PassThrough;
|
| 179 |
+
using BElementOp = PassThrough;
|
| 180 |
+
using CDEElementOp = AddReluAdd;
|
| 181 |
+
using HElementOp = PassThrough;
|
| 182 |
+
|
| 183 |
+
const auto a_element_op = AElementOp{};
|
| 184 |
+
const auto b_element_op = BElementOp{};
|
| 185 |
+
const auto cde_element_op = CDEElementOp{};
|
| 186 |
+
const auto h_element_op = HElementOp{};
|
| 187 |
+
|
| 188 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGemmMultipleDLayernorm<
|
| 189 |
+
ALayout,
|
| 190 |
+
BLayout,
|
| 191 |
+
ck::Tuple<D0Layout, D1Layout>,
|
| 192 |
+
HLayout,
|
| 193 |
+
ADataType,
|
| 194 |
+
BDataType,
|
| 195 |
+
ck::Tuple<D0DataType, D1DataType>,
|
| 196 |
+
GammaDataType,
|
| 197 |
+
BetaDataType,
|
| 198 |
+
HDataType,
|
| 199 |
+
ck::tensor_operation::element_wise::PassThrough,
|
| 200 |
+
ck::tensor_operation::element_wise::PassThrough,
|
| 201 |
+
ck::tensor_operation::element_wise::AddReluAdd,
|
| 202 |
+
ck::tensor_operation::element_wise::PassThrough>;
|
| 203 |
+
|
| 204 |
+
// get device op instances
|
| 205 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 206 |
+
DeviceOp>::GetInstances();
|
| 207 |
+
|
| 208 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 209 |
+
|
| 210 |
+
// run reference
|
| 211 |
+
if(do_verification)
|
| 212 |
+
{
|
| 213 |
+
host_gemm_layernorm<ADataType,
|
| 214 |
+
BDataType,
|
| 215 |
+
AccDataType,
|
| 216 |
+
D0DataType,
|
| 217 |
+
D1DataType,
|
| 218 |
+
EMeanVarDataType,
|
| 219 |
+
GammaDataType,
|
| 220 |
+
BetaDataType,
|
| 221 |
+
HDataType>(h_m_n_host,
|
| 222 |
+
a_m_k,
|
| 223 |
+
b_k_n,
|
| 224 |
+
d0_m_n,
|
| 225 |
+
d1_m_n,
|
| 226 |
+
gamma_n,
|
| 227 |
+
beta_n,
|
| 228 |
+
a_element_op,
|
| 229 |
+
b_element_op,
|
| 230 |
+
cde_element_op,
|
| 231 |
+
h_element_op,
|
| 232 |
+
M,
|
| 233 |
+
N,
|
| 234 |
+
epsilon);
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 238 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 239 |
+
DeviceMem d0_m_n_device_buf(sizeof(D0DataType) * d0_m_n.mDesc.GetElementSpaceSize());
|
| 240 |
+
DeviceMem d1_m_n_device_buf(sizeof(D1DataType) * d1_m_n.mDesc.GetElementSpaceSize());
|
| 241 |
+
DeviceMem gamma_device_buf(sizeof(GammaDataType) * gamma_n.mDesc.GetElementSpaceSize());
|
| 242 |
+
DeviceMem beta_device_buf(sizeof(BetaDataType) * beta_n.mDesc.GetElementSpaceSize());
|
| 243 |
+
DeviceMem h_device_buf(sizeof(HDataType) * h_m_n.mDesc.GetElementSpaceSize());
|
| 244 |
+
|
| 245 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 246 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 247 |
+
d0_m_n_device_buf.ToDevice(d0_m_n.mData.data());
|
| 248 |
+
d1_m_n_device_buf.ToDevice(d1_m_n.mData.data());
|
| 249 |
+
gamma_device_buf.ToDevice(gamma_n.mData.data());
|
| 250 |
+
beta_device_buf.ToDevice(beta_n.mData.data());
|
| 251 |
+
|
| 252 |
+
std::string best_op_name;
|
| 253 |
+
float best_ave_time = std::numeric_limits<float>::max();
|
| 254 |
+
float best_gb_per_sec = 0;
|
| 255 |
+
|
| 256 |
+
bool pass = true;
|
| 257 |
+
int num_kernel = 0;
|
| 258 |
+
|
| 259 |
+
// profile device operation instances
|
| 260 |
+
for(auto& op_ptr : op_ptrs)
|
| 261 |
+
{
|
| 262 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 263 |
+
a_device_buf.GetDeviceBuffer(),
|
| 264 |
+
b_device_buf.GetDeviceBuffer(),
|
| 265 |
+
{d0_m_n_device_buf.GetDeviceBuffer(), d1_m_n_device_buf.GetDeviceBuffer()},
|
| 266 |
+
gamma_device_buf.GetDeviceBuffer(),
|
| 267 |
+
beta_device_buf.GetDeviceBuffer(),
|
| 268 |
+
h_device_buf.GetDeviceBuffer(),
|
| 269 |
+
M,
|
| 270 |
+
N,
|
| 271 |
+
K,
|
| 272 |
+
StrideA,
|
| 273 |
+
StrideB,
|
| 274 |
+
{StrideD0, StrideD1},
|
| 275 |
+
StrideH,
|
| 276 |
+
epsilon,
|
| 277 |
+
a_element_op,
|
| 278 |
+
b_element_op,
|
| 279 |
+
cde_element_op,
|
| 280 |
+
h_element_op);
|
| 281 |
+
|
| 282 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 283 |
+
|
| 284 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 285 |
+
|
| 286 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 287 |
+
{
|
| 288 |
+
++num_kernel;
|
| 289 |
+
|
| 290 |
+
size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
| 291 |
+
DeviceMem workspace_dev(workspace_sz);
|
| 292 |
+
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_dev.GetDeviceBuffer());
|
| 293 |
+
|
| 294 |
+
// re-init E to zero before profiling a kernel
|
| 295 |
+
h_device_buf.SetZero();
|
| 296 |
+
|
| 297 |
+
float ave_time =
|
| 298 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 299 |
+
|
| 300 |
+
std::size_t num_byte =
|
| 301 |
+
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N +
|
| 302 |
+
(sizeof(D0DataType) + sizeof(D1DataType) + sizeof(HDataType)) * M * N +
|
| 303 |
+
(sizeof(GammaDataType) + sizeof(BetaDataType)) * N;
|
| 304 |
+
|
| 305 |
+
float gb_per_sec = num_byte / 1.E6 / ave_time;
|
| 306 |
+
|
| 307 |
+
if(time_kernel)
|
| 308 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << gb_per_sec
|
| 309 |
+
<< " GB/s, " << op_name << std::endl;
|
| 310 |
+
|
| 311 |
+
if(ave_time < best_ave_time)
|
| 312 |
+
{
|
| 313 |
+
best_op_name = op_name;
|
| 314 |
+
best_ave_time = ave_time;
|
| 315 |
+
best_gb_per_sec = gb_per_sec;
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
if(do_verification)
|
| 319 |
+
{
|
| 320 |
+
h_device_buf.FromDevice(h_m_n.mData.data());
|
| 321 |
+
|
| 322 |
+
pass = pass && ck::utils::check_err(
|
| 323 |
+
h_m_n, h_m_n_host, "Error: Incorrect results h_m_n", 1e-2, 1e-2);
|
| 324 |
+
}
|
| 325 |
+
}
|
| 326 |
+
else
|
| 327 |
+
{
|
| 328 |
+
if(time_kernel)
|
| 329 |
+
std::cout << op_name << " does not support this problem" << std::endl;
|
| 330 |
+
}
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
if(num_kernel == 0)
|
| 334 |
+
{
|
| 335 |
+
std::cout << "Error: No kernel is applicable" << std::endl;
|
| 336 |
+
pass = false;
|
| 337 |
+
}
|
| 338 |
+
else
|
| 339 |
+
{
|
| 340 |
+
if(time_kernel)
|
| 341 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_gb_per_sec << " GB/s, "
|
| 342 |
+
<< best_op_name << std::endl;
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
return pass;
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
} // namespace profiler
|
| 349 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_add_relu_impl.hpp
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 12 |
+
|
| 13 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_add_relu.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/utility/check_err.hpp"
|
| 16 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 17 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 18 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 19 |
+
#include "ck/library/utility/literals.hpp"
|
| 20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 21 |
+
|
| 22 |
+
namespace ck {
|
| 23 |
+
namespace profiler {
|
| 24 |
+
|
| 25 |
+
template <typename ADataType,
|
| 26 |
+
typename BDataType,
|
| 27 |
+
typename AccDataType,
|
| 28 |
+
typename D0DataType,
|
| 29 |
+
typename EDataType,
|
| 30 |
+
typename ALayout,
|
| 31 |
+
typename BLayout,
|
| 32 |
+
typename D0Layout,
|
| 33 |
+
typename ELayout>
|
| 34 |
+
bool profile_gemm_add_relu_impl(int do_verification,
|
| 35 |
+
int init_method,
|
| 36 |
+
bool /*do_log*/,
|
| 37 |
+
bool time_kernel,
|
| 38 |
+
int M,
|
| 39 |
+
int N,
|
| 40 |
+
int K,
|
| 41 |
+
int StrideA,
|
| 42 |
+
int StrideB,
|
| 43 |
+
int StrideD0,
|
| 44 |
+
int StrideE)
|
| 45 |
+
{
|
| 46 |
+
auto f_host_tensor_descriptor =
|
| 47 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 48 |
+
using namespace ck::literals;
|
| 49 |
+
|
| 50 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 51 |
+
{
|
| 52 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 53 |
+
}
|
| 54 |
+
else
|
| 55 |
+
{
|
| 56 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 57 |
+
}
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 61 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 62 |
+
Tensor<D0DataType> d0_m_n(f_host_tensor_descriptor(M, N, StrideD0, D0Layout{}));
|
| 63 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 64 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 65 |
+
|
| 66 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 67 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 68 |
+
std::cout << "d0_m_n: " << d0_m_n.mDesc << std::endl;
|
| 69 |
+
std::cout << "e_m_n: " << e_m_n_device_result.mDesc << std::endl;
|
| 70 |
+
|
| 71 |
+
switch(init_method)
|
| 72 |
+
{
|
| 73 |
+
case 0: break;
|
| 74 |
+
case 1:
|
| 75 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
| 76 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
| 77 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_2<D0DataType>{-5, 5});
|
| 78 |
+
break;
|
| 79 |
+
default:
|
| 80 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 81 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
| 82 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{0.0, 1.0});
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 86 |
+
using AddRelu = ck::tensor_operation::element_wise::AddRelu;
|
| 87 |
+
|
| 88 |
+
using AElementOp = PassThrough;
|
| 89 |
+
using BElementOp = PassThrough;
|
| 90 |
+
using CDEElementOp = AddRelu;
|
| 91 |
+
|
| 92 |
+
const auto a_element_op = AElementOp{};
|
| 93 |
+
const auto b_element_op = BElementOp{};
|
| 94 |
+
const auto cde_element_op = CDEElementOp{};
|
| 95 |
+
|
| 96 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGemmMultipleD<
|
| 97 |
+
ALayout,
|
| 98 |
+
BLayout,
|
| 99 |
+
ck::Tuple<D0Layout>,
|
| 100 |
+
ELayout,
|
| 101 |
+
ADataType,
|
| 102 |
+
BDataType,
|
| 103 |
+
ck::Tuple<D0DataType>,
|
| 104 |
+
EDataType,
|
| 105 |
+
ck::tensor_operation::element_wise::PassThrough,
|
| 106 |
+
ck::tensor_operation::element_wise::PassThrough,
|
| 107 |
+
ck::tensor_operation::element_wise::AddRelu>;
|
| 108 |
+
|
| 109 |
+
// get device op instances
|
| 110 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 111 |
+
DeviceOp>::GetInstances();
|
| 112 |
+
|
| 113 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 114 |
+
|
| 115 |
+
// run reference
|
| 116 |
+
if(do_verification)
|
| 117 |
+
{
|
| 118 |
+
Tensor<AccDataType> c_m_n({M, N});
|
| 119 |
+
|
| 120 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 121 |
+
BDataType,
|
| 122 |
+
AccDataType,
|
| 123 |
+
AccDataType,
|
| 124 |
+
AElementOp,
|
| 125 |
+
BElementOp,
|
| 126 |
+
PassThrough>;
|
| 127 |
+
|
| 128 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 129 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 130 |
+
|
| 131 |
+
auto ref_argument =
|
| 132 |
+
ref_gemm.MakeArgument(a_m_k, b_k_n, c_m_n, a_element_op, b_element_op, PassThrough{});
|
| 133 |
+
|
| 134 |
+
ref_invoker.Run(ref_argument);
|
| 135 |
+
|
| 136 |
+
for(int m = 0; m < M; ++m)
|
| 137 |
+
{
|
| 138 |
+
for(int n = 0; n < N; ++n)
|
| 139 |
+
{
|
| 140 |
+
cde_element_op(e_m_n_host_result(m, n), c_m_n(m, n), d0_m_n(m, n));
|
| 141 |
+
}
|
| 142 |
+
}
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 146 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 147 |
+
DeviceMem d0_m_n_device_buf(sizeof(D0DataType) * d0_m_n.mDesc.GetElementSpaceSize());
|
| 148 |
+
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 149 |
+
|
| 150 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 151 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 152 |
+
d0_m_n_device_buf.ToDevice(d0_m_n.mData.data());
|
| 153 |
+
|
| 154 |
+
std::string best_op_name;
|
| 155 |
+
float best_ave_time = 0;
|
| 156 |
+
float best_tflops = 0;
|
| 157 |
+
float best_gb_per_sec = 0;
|
| 158 |
+
|
| 159 |
+
bool pass = true;
|
| 160 |
+
|
| 161 |
+
// profile device operation instances
|
| 162 |
+
for(auto& op_ptr : op_ptrs)
|
| 163 |
+
{
|
| 164 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 165 |
+
a_device_buf.GetDeviceBuffer(),
|
| 166 |
+
b_device_buf.GetDeviceBuffer(),
|
| 167 |
+
std::array<const void*, 1>{d0_m_n_device_buf.GetDeviceBuffer()},
|
| 168 |
+
e_device_buf.GetDeviceBuffer(),
|
| 169 |
+
M,
|
| 170 |
+
N,
|
| 171 |
+
K,
|
| 172 |
+
StrideA,
|
| 173 |
+
StrideB,
|
| 174 |
+
std::array<ck::index_t, 1>{StrideD0},
|
| 175 |
+
StrideE,
|
| 176 |
+
a_element_op,
|
| 177 |
+
b_element_op,
|
| 178 |
+
cde_element_op);
|
| 179 |
+
|
| 180 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 181 |
+
|
| 182 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 183 |
+
|
| 184 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 185 |
+
{
|
| 186 |
+
// re-init E to zero before profiling a kernel
|
| 187 |
+
e_device_buf.SetZero();
|
| 188 |
+
|
| 189 |
+
float ave_time =
|
| 190 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 191 |
+
|
| 192 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 193 |
+
|
| 194 |
+
std::size_t num_btype =
|
| 195 |
+
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(EDataType) * M * N;
|
| 196 |
+
|
| 197 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 198 |
+
|
| 199 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 200 |
+
|
| 201 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, "
|
| 202 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 203 |
+
|
| 204 |
+
if(tflops > best_tflops)
|
| 205 |
+
{
|
| 206 |
+
best_op_name = op_name;
|
| 207 |
+
best_tflops = tflops;
|
| 208 |
+
best_ave_time = ave_time;
|
| 209 |
+
best_gb_per_sec = gb_per_sec;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
if(do_verification)
|
| 213 |
+
{
|
| 214 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
| 215 |
+
|
| 216 |
+
pass = pass && ck::utils::check_err(e_m_n_device_result, e_m_n_host_result);
|
| 217 |
+
}
|
| 218 |
+
}
|
| 219 |
+
else
|
| 220 |
+
{
|
| 221 |
+
std::cout << op_name << " does not support this problem" << std::endl;
|
| 222 |
+
}
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 226 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
| 227 |
+
|
| 228 |
+
return pass;
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
} // namespace profiler
|
| 232 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_bias_add_reduce_impl.hpp
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include "ck/ck.hpp"
|
| 7 |
+
#include "ck/utility/reduction_operator.hpp"
|
| 8 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_reduce.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 11 |
+
|
| 12 |
+
#include "ck/library/utility/check_err.hpp"
|
| 13 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
| 14 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 17 |
+
#include "ck/library/utility/literals.hpp"
|
| 18 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 19 |
+
|
| 20 |
+
namespace ck {
|
| 21 |
+
namespace tensor_operation {
|
| 22 |
+
namespace device {
|
| 23 |
+
namespace instance {
|
| 24 |
+
|
| 25 |
+
using F32 = float;
|
| 26 |
+
using F16 = ck::half_t;
|
| 27 |
+
using ReducePtrsGlobal = ck::Tuple<F32*, F32*>;
|
| 28 |
+
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
| 29 |
+
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
| 30 |
+
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
| 31 |
+
using ReduceInElementOps = ck::Tuple<Identity, Square>;
|
| 32 |
+
using ReduceOutElementOps = ck::Tuple<Div, Div>;
|
| 33 |
+
|
| 34 |
+
using DeviceGemmBiasAddReduceNoOpPtr =
|
| 35 |
+
ck::tensor_operation::device::DeviceGemmReducePtr<1, ReducePtrsGlobal::Size()>;
|
| 36 |
+
|
| 37 |
+
void add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_mk_kn_mn_instances(
|
| 38 |
+
std::vector<DeviceGemmBiasAddReduceNoOpPtr>&);
|
| 39 |
+
|
| 40 |
+
void add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_mk_nk_mn_instances(
|
| 41 |
+
std::vector<DeviceGemmBiasAddReduceNoOpPtr>&);
|
| 42 |
+
|
| 43 |
+
void add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_km_kn_mn_instances(
|
| 44 |
+
std::vector<DeviceGemmBiasAddReduceNoOpPtr>&);
|
| 45 |
+
|
| 46 |
+
void add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_km_nk_mn_instances(
|
| 47 |
+
std::vector<DeviceGemmBiasAddReduceNoOpPtr>&);
|
| 48 |
+
|
| 49 |
+
} // namespace instance
|
| 50 |
+
} // namespace device
|
| 51 |
+
} // namespace tensor_operation
|
| 52 |
+
} // namespace ck
|
| 53 |
+
|
| 54 |
+
namespace ck {
|
| 55 |
+
namespace profiler {
|
| 56 |
+
|
| 57 |
+
template <typename ADataType,
|
| 58 |
+
typename BDataType,
|
| 59 |
+
typename CDataType,
|
| 60 |
+
typename BiasDataType,
|
| 61 |
+
typename D0DataType,
|
| 62 |
+
typename ReduceDataType,
|
| 63 |
+
typename ALayout,
|
| 64 |
+
typename BLayout,
|
| 65 |
+
typename CLayout>
|
| 66 |
+
void profile_gemm_bias_add_reduce_impl(int do_verification,
|
| 67 |
+
int init_method,
|
| 68 |
+
bool do_log,
|
| 69 |
+
bool time_kernel,
|
| 70 |
+
int M,
|
| 71 |
+
int N,
|
| 72 |
+
int K,
|
| 73 |
+
int StrideA,
|
| 74 |
+
int StrideB,
|
| 75 |
+
int StrideC,
|
| 76 |
+
int StrideD0)
|
| 77 |
+
{
|
| 78 |
+
auto f_host_tensor_descriptor1d = [](std::size_t len, std::size_t stride) {
|
| 79 |
+
return HostTensorDescriptor({len}, {stride});
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
auto f_host_tensor_descriptor2d =
|
| 83 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 84 |
+
using namespace ck::literals;
|
| 85 |
+
|
| 86 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 87 |
+
{
|
| 88 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 89 |
+
}
|
| 90 |
+
else
|
| 91 |
+
{
|
| 92 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 93 |
+
}
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor2d(M, K, StrideA, ALayout{}));
|
| 97 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor2d(K, N, StrideB, BLayout{}));
|
| 98 |
+
|
| 99 |
+
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor2d(M, N, StrideC, CLayout{}));
|
| 100 |
+
Tensor<BiasDataType> bias_n(f_host_tensor_descriptor1d(N, 1));
|
| 101 |
+
Tensor<D0DataType> d0_m_n(f_host_tensor_descriptor2d(M, N, StrideC, CLayout{}));
|
| 102 |
+
Tensor<ReduceDataType> reduce0_m_host_result({M});
|
| 103 |
+
Tensor<ReduceDataType> reduce1_m_host_result({M});
|
| 104 |
+
|
| 105 |
+
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor2d(M, N, StrideC, CLayout{}));
|
| 106 |
+
Tensor<ReduceDataType> reduce0_m_device_result({M});
|
| 107 |
+
Tensor<ReduceDataType> reduce1_m_device_result({M});
|
| 108 |
+
|
| 109 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 110 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 111 |
+
std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl;
|
| 112 |
+
std::cout << "reduce0_m: " << reduce0_m_host_result.mDesc << std::endl;
|
| 113 |
+
std::cout << "reduce1_m: " << reduce1_m_host_result.mDesc << std::endl;
|
| 114 |
+
|
| 115 |
+
std::size_t num_thread = 1;
|
| 116 |
+
switch(init_method)
|
| 117 |
+
{
|
| 118 |
+
case 0: break;
|
| 119 |
+
case 1:
|
| 120 |
+
std::srand(0);
|
| 121 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5}, num_thread);
|
| 122 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
|
| 123 |
+
bias_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
|
| 124 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
|
| 125 |
+
break;
|
| 126 |
+
default:
|
| 127 |
+
std::srand(0);
|
| 128 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0}, num_thread);
|
| 129 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
| 130 |
+
bias_n.GenerateTensorValue(GeneratorTensor_3<ADataType>{-0.5, 0.5}, num_thread);
|
| 131 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 135 |
+
using AElementOp = PassThrough;
|
| 136 |
+
using BElementOp = PassThrough;
|
| 137 |
+
using CElementOp = PassThrough;
|
| 138 |
+
using D0ElementOp = PassThrough;
|
| 139 |
+
using ReduceOp0 = ck::reduce::Add;
|
| 140 |
+
using ReduceOp1 = ck::reduce::Add;
|
| 141 |
+
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
|
| 142 |
+
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 143 |
+
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
| 144 |
+
|
| 145 |
+
auto a_element_op = AElementOp{};
|
| 146 |
+
auto b_element_op = BElementOp{};
|
| 147 |
+
auto c_element_op = CElementOp{};
|
| 148 |
+
std::array<void*, 3> gemm_element_ops = {&a_element_op, &b_element_op, &c_element_op};
|
| 149 |
+
|
| 150 |
+
auto d0_element_op = D0ElementOp{};
|
| 151 |
+
const auto reduce0_op = ReduceOp0{};
|
| 152 |
+
const auto reduce1_op = ReduceOp1{};
|
| 153 |
+
|
| 154 |
+
auto passthrough = UnaryIdenticElementOp{};
|
| 155 |
+
auto square = UnarySquareElementOp{};
|
| 156 |
+
auto div = UnaryDivElementOp{N};
|
| 157 |
+
std::array<void*, 2> reduce_in_element_ops = {&passthrough, &square};
|
| 158 |
+
std::array<void*, 2> reduce_out_element_ops = {&div, &div};
|
| 159 |
+
|
| 160 |
+
if(do_verification)
|
| 161 |
+
{
|
| 162 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 163 |
+
BDataType,
|
| 164 |
+
CDataType,
|
| 165 |
+
ReduceDataType,
|
| 166 |
+
AElementOp,
|
| 167 |
+
BElementOp,
|
| 168 |
+
CElementOp>;
|
| 169 |
+
|
| 170 |
+
using ReduceAccDataType = ReduceDataType;
|
| 171 |
+
|
| 172 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 173 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 174 |
+
|
| 175 |
+
auto ref_argument = ref_gemm.MakeArgument(
|
| 176 |
+
a_m_k, b_k_n, c_m_n_host_result, a_element_op, b_element_op, PassThrough{});
|
| 177 |
+
|
| 178 |
+
ref_invoker.Run(ref_argument);
|
| 179 |
+
|
| 180 |
+
for(int m = 0; m < M; ++m)
|
| 181 |
+
for(int n = 0; n < N; ++n)
|
| 182 |
+
{
|
| 183 |
+
ReduceAccDataType acc = static_cast<ReduceAccDataType>(c_m_n_host_result(m, n)) +
|
| 184 |
+
static_cast<ReduceAccDataType>(bias_n(n));
|
| 185 |
+
|
| 186 |
+
ReduceAccDataType d0 = static_cast<ReduceAccDataType>(d0_m_n(m, n));
|
| 187 |
+
c_element_op(acc, acc);
|
| 188 |
+
d0_element_op(d0, d0);
|
| 189 |
+
acc += d0;
|
| 190 |
+
c_m_n_host_result(m, n) = static_cast<CDataType>(acc);
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
for(int m = 0; m < M; ++m)
|
| 194 |
+
{
|
| 195 |
+
auto reduce0_acc = reduce0_op.GetIdentityValue<ReduceAccDataType>();
|
| 196 |
+
auto reduce1_acc = reduce1_op.GetIdentityValue<ReduceAccDataType>();
|
| 197 |
+
|
| 198 |
+
for(int n = 0; n < N; ++n)
|
| 199 |
+
{
|
| 200 |
+
ReduceAccDataType d0_val =
|
| 201 |
+
ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
|
| 202 |
+
ReduceAccDataType d1_val;
|
| 203 |
+
|
| 204 |
+
square(d1_val, d0_val);
|
| 205 |
+
reduce0_op(reduce0_acc, d0_val);
|
| 206 |
+
reduce1_op(reduce1_acc, d1_val);
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
div(reduce0_acc, reduce0_acc);
|
| 210 |
+
div(reduce1_acc, reduce1_acc);
|
| 211 |
+
reduce0_m_host_result(m) = ck::type_convert<ReduceDataType>(reduce0_acc);
|
| 212 |
+
reduce1_m_host_result(m) = ck::type_convert<ReduceDataType>(reduce1_acc);
|
| 213 |
+
}
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 217 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 218 |
+
DeviceMem c_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 219 |
+
DeviceMem bias_device_buf(sizeof(BiasDataType) * bias_n.mDesc.GetElementSpaceSize());
|
| 220 |
+
DeviceMem d0_device_buf(sizeof(D0DataType) * d0_m_n.mDesc.GetElementSpaceSize());
|
| 221 |
+
DeviceMem reduce0_device_buf(sizeof(ReduceDataType) *
|
| 222 |
+
reduce0_m_device_result.mDesc.GetElementSpaceSize());
|
| 223 |
+
DeviceMem reduce1_device_buf(sizeof(ReduceDataType) *
|
| 224 |
+
reduce1_m_device_result.mDesc.GetElementSpaceSize());
|
| 225 |
+
|
| 226 |
+
std::array<void*, 2> p_reduces = {reduce0_device_buf.GetDeviceBuffer(),
|
| 227 |
+
reduce1_device_buf.GetDeviceBuffer()};
|
| 228 |
+
|
| 229 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 230 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 231 |
+
bias_device_buf.ToDevice(bias_n.mData.data());
|
| 232 |
+
d0_device_buf.ToDevice(d0_m_n.mData.data());
|
| 233 |
+
|
| 234 |
+
// add device GEMM instances
|
| 235 |
+
std::vector<ck::tensor_operation::device::instance::DeviceGemmBiasAddReduceNoOpPtr> gemm_ptrs;
|
| 236 |
+
|
| 237 |
+
if constexpr(is_same<ADataType, half_t>::value && is_same<BDataType, half_t>::value &&
|
| 238 |
+
is_same<CDataType, half_t>::value)
|
| 239 |
+
{
|
| 240 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value &&
|
| 241 |
+
is_same<BLayout, tensor_layout::gemm::RowMajor>::value &&
|
| 242 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
| 243 |
+
{
|
| 244 |
+
ck::tensor_operation::device::instance::
|
| 245 |
+
add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_mk_kn_mn_instances(
|
| 246 |
+
gemm_ptrs);
|
| 247 |
+
}
|
| 248 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value &&
|
| 249 |
+
is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value &&
|
| 250 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
| 251 |
+
{
|
| 252 |
+
ck::tensor_operation::device::instance::
|
| 253 |
+
add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_mk_nk_mn_instances(
|
| 254 |
+
gemm_ptrs);
|
| 255 |
+
}
|
| 256 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value &&
|
| 257 |
+
is_same<BLayout, tensor_layout::gemm::RowMajor>::value &&
|
| 258 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
| 259 |
+
{
|
| 260 |
+
ck::tensor_operation::device::instance::
|
| 261 |
+
add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_km_kn_mn_instances(
|
| 262 |
+
gemm_ptrs);
|
| 263 |
+
}
|
| 264 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value &&
|
| 265 |
+
is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value &&
|
| 266 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
| 267 |
+
{
|
| 268 |
+
ck::tensor_operation::device::instance::
|
| 269 |
+
add_device_gemm_bias_add_mean_squaremean_xdl_cshuffle_f16_f16_f16_f16_f16_f32_f32_km_nk_mn_instances(
|
| 270 |
+
gemm_ptrs);
|
| 271 |
+
}
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
if(gemm_ptrs.size() <= 0)
|
| 275 |
+
{
|
| 276 |
+
throw std::runtime_error("wrong! no device GEMM instance found");
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
std::string best_gemm_name;
|
| 280 |
+
float best_ave_time = 0;
|
| 281 |
+
float best_tflops = 0;
|
| 282 |
+
float best_gb_per_sec = 0;
|
| 283 |
+
|
| 284 |
+
// profile device GEMM instances
|
| 285 |
+
for(auto& gemm_ptr : gemm_ptrs)
|
| 286 |
+
{
|
| 287 |
+
auto argument_ptr = gemm_ptr->MakeArgumentPointer(a_device_buf.GetDeviceBuffer(),
|
| 288 |
+
b_device_buf.GetDeviceBuffer(),
|
| 289 |
+
bias_device_buf.GetDeviceBuffer(),
|
| 290 |
+
{d0_device_buf.GetDeviceBuffer()},
|
| 291 |
+
c_device_buf.GetDeviceBuffer(),
|
| 292 |
+
p_reduces,
|
| 293 |
+
M,
|
| 294 |
+
N,
|
| 295 |
+
K,
|
| 296 |
+
StrideA,
|
| 297 |
+
StrideB,
|
| 298 |
+
StrideC,
|
| 299 |
+
{StrideD0},
|
| 300 |
+
gemm_element_ops,
|
| 301 |
+
{&d0_element_op},
|
| 302 |
+
reduce_in_element_ops,
|
| 303 |
+
reduce_out_element_ops);
|
| 304 |
+
|
| 305 |
+
auto invoker_ptr = gemm_ptr->MakeInvokerPointer();
|
| 306 |
+
|
| 307 |
+
if(gemm_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 308 |
+
{
|
| 309 |
+
// init DO, D1 to 0
|
| 310 |
+
reduce0_device_buf.SetZero();
|
| 311 |
+
reduce1_device_buf.SetZero();
|
| 312 |
+
|
| 313 |
+
float ave_time =
|
| 314 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 315 |
+
|
| 316 |
+
std::string gemm_name = gemm_ptr->GetTypeString();
|
| 317 |
+
|
| 318 |
+
std::size_t flop = std::size_t(2) * M * N * K + std::size_t(2) * M * N;
|
| 319 |
+
|
| 320 |
+
std::size_t num_byte = sizeof(ADataType) * M * K + sizeof(BDataType) * K * N +
|
| 321 |
+
sizeof(CDataType) * M * N + sizeof(BiasDataType) * M * N +
|
| 322 |
+
sizeof(D0DataType) * M * N + sizeof(ReduceDataType) * M +
|
| 323 |
+
sizeof(ReduceDataType) * M;
|
| 324 |
+
|
| 325 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 326 |
+
|
| 327 |
+
float gb_per_sec = num_byte / 1.E6 / ave_time;
|
| 328 |
+
|
| 329 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
| 330 |
+
<< " GB/s, " << gemm_name << std::endl;
|
| 331 |
+
|
| 332 |
+
if(tflops > best_tflops)
|
| 333 |
+
{
|
| 334 |
+
best_gemm_name = gemm_name;
|
| 335 |
+
best_tflops = tflops;
|
| 336 |
+
best_ave_time = ave_time;
|
| 337 |
+
best_gb_per_sec = gb_per_sec;
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
if(do_verification)
|
| 341 |
+
{
|
| 342 |
+
c_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
| 343 |
+
reduce0_device_buf.FromDevice(reduce0_m_device_result.mData.data());
|
| 344 |
+
reduce1_device_buf.FromDevice(reduce1_m_device_result.mData.data());
|
| 345 |
+
|
| 346 |
+
ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
|
| 347 |
+
ck::utils::check_err(reduce0_m_device_result, reduce0_m_host_result);
|
| 348 |
+
ck::utils::check_err(reduce1_m_device_result, reduce1_m_host_result);
|
| 349 |
+
|
| 350 |
+
if(do_log)
|
| 351 |
+
{
|
| 352 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k.mData, ",") << std::endl;
|
| 353 |
+
LogRangeAsType<float>(std::cout << "b: ", b_k_n.mData, ",") << std::endl;
|
| 354 |
+
LogRangeAsType<float>(std::cout << "c_host: ", c_m_n_host_result.mData, ",")
|
| 355 |
+
<< std::endl;
|
| 356 |
+
LogRangeAsType<float>(std::cout << "c_device: ", c_m_n_device_result.mData, ",")
|
| 357 |
+
<< std::endl;
|
| 358 |
+
LogRangeAsType<float>(
|
| 359 |
+
std::cout << "d0_host: ", reduce0_m_host_result.mData, ",")
|
| 360 |
+
<< std::endl;
|
| 361 |
+
LogRangeAsType<float>(
|
| 362 |
+
std::cout << "d0_device: ", reduce0_m_device_result.mData, ",")
|
| 363 |
+
<< std::endl;
|
| 364 |
+
LogRangeAsType<float>(
|
| 365 |
+
std::cout << "d1_host: ", reduce1_m_host_result.mData, ",")
|
| 366 |
+
<< std::endl;
|
| 367 |
+
LogRangeAsType<float>(
|
| 368 |
+
std::cout << "d1_device: ", reduce1_m_device_result.mData, ",")
|
| 369 |
+
<< std::endl;
|
| 370 |
+
}
|
| 371 |
+
}
|
| 372 |
+
}
|
| 373 |
+
else
|
| 374 |
+
{
|
| 375 |
+
std::cout << "does not support this GEMM problem" << std::endl;
|
| 376 |
+
}
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 380 |
+
<< best_gb_per_sec << " GB/s, " << best_gemm_name << std::endl;
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
} // namespace profiler
|
| 384 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_blockscale_wp_impl.hpp
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2025, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_xdl_cshuffle_v3_blockscale_bpreshuffle.hpp"
|
| 13 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_blockscale_wp.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/literals.hpp"
|
| 22 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 23 |
+
|
| 24 |
+
namespace ck {
|
| 25 |
+
namespace profiler {
|
| 26 |
+
|
| 27 |
+
template <typename InOutDataType>
|
| 28 |
+
void preShuffleBuffer(const InOutDataType* src, InOutDataType* dst, int N, int K, int NXdl)
|
| 29 |
+
{
|
| 30 |
+
int KPack = 16;
|
| 31 |
+
int NLane = NXdl;
|
| 32 |
+
int KLane = 64 / NLane;
|
| 33 |
+
|
| 34 |
+
int K0 = K / (KLane * KPack);
|
| 35 |
+
// K -> K0 KLane KPack
|
| 36 |
+
// N -> N0 NLane
|
| 37 |
+
// N, K -> N0 K0 KLane NLane KPack
|
| 38 |
+
int tempk;
|
| 39 |
+
for(int n = 0; n < N; ++n)
|
| 40 |
+
{
|
| 41 |
+
for(int k = 0; k < K; ++k)
|
| 42 |
+
{
|
| 43 |
+
int n0 = n / NLane;
|
| 44 |
+
int n1 = n % NLane;
|
| 45 |
+
|
| 46 |
+
int k0 = k / (KLane * KPack);
|
| 47 |
+
tempk = k % (KLane * KPack);
|
| 48 |
+
int k1 = tempk / KPack;
|
| 49 |
+
int k2 = tempk % KPack;
|
| 50 |
+
|
| 51 |
+
int outputIndex = n0 * KPack * NLane * KLane * K0 + k0 * KPack * NLane * KLane +
|
| 52 |
+
k1 * KPack * NLane + n1 * KPack + k2;
|
| 53 |
+
|
| 54 |
+
dst[outputIndex] = src[n * K + k];
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
template <typename A0DataType,
|
| 60 |
+
typename A1DataType,
|
| 61 |
+
typename B0DataType,
|
| 62 |
+
typename B1DataType,
|
| 63 |
+
typename ComputeDataType,
|
| 64 |
+
typename AccDataType,
|
| 65 |
+
typename EDataType,
|
| 66 |
+
index_t ScaleBlockM,
|
| 67 |
+
index_t ScaleBlockN,
|
| 68 |
+
index_t ScaleBlockK,
|
| 69 |
+
typename ALayout,
|
| 70 |
+
typename BLayout,
|
| 71 |
+
typename ELayout>
|
| 72 |
+
bool profile_gemm_blockscale_weighpreshuffle_impl(int do_verification,
|
| 73 |
+
int init_method,
|
| 74 |
+
bool do_log,
|
| 75 |
+
bool time_kernel,
|
| 76 |
+
int M,
|
| 77 |
+
int N,
|
| 78 |
+
int K,
|
| 79 |
+
int StrideA,
|
| 80 |
+
int StrideB,
|
| 81 |
+
int StrideE,
|
| 82 |
+
int n_warmup,
|
| 83 |
+
int n_iter,
|
| 84 |
+
uint64_t rotating = 0)
|
| 85 |
+
{
|
| 86 |
+
bool pass = true;
|
| 87 |
+
|
| 88 |
+
auto f_host_tensor_descriptor =
|
| 89 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 90 |
+
using namespace ck::literals;
|
| 91 |
+
|
| 92 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 93 |
+
{
|
| 94 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 95 |
+
}
|
| 96 |
+
else
|
| 97 |
+
{
|
| 98 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 99 |
+
}
|
| 100 |
+
};
|
| 101 |
+
|
| 102 |
+
ck::index_t Scale_Stride_AM = ((M + ScaleBlockM - 1) / ScaleBlockM);
|
| 103 |
+
ck::index_t Scale_Stride_BN = ck::is_same_v<BLayout, ck::tensor_layout::gemm::ColumnMajor>
|
| 104 |
+
? ((K + ScaleBlockK - 1) / ScaleBlockK)
|
| 105 |
+
: ((N + ScaleBlockN - 1) / ScaleBlockN);
|
| 106 |
+
|
| 107 |
+
Tensor<A0DataType> a0_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 108 |
+
Tensor<A1DataType> a1_m_k(f_host_tensor_descriptor((M + ScaleBlockM - 1) / ScaleBlockM,
|
| 109 |
+
(K + ScaleBlockK - 1) / ScaleBlockK,
|
| 110 |
+
Scale_Stride_AM,
|
| 111 |
+
ck::tensor_layout::gemm::ColumnMajor{}));
|
| 112 |
+
Tensor<B0DataType> b0_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 113 |
+
Tensor<B0DataType> b_preshuffled_mfma16(
|
| 114 |
+
f_host_tensor_descriptor(K, N, StrideB, BLayout{})); // use layout only for size
|
| 115 |
+
Tensor<B0DataType> b_preshuffled_mfma32(
|
| 116 |
+
f_host_tensor_descriptor(K, N, StrideB, BLayout{})); // use layout only for size
|
| 117 |
+
Tensor<B1DataType> b1_k_n(f_host_tensor_descriptor((K + ScaleBlockK - 1) / ScaleBlockK,
|
| 118 |
+
(N + ScaleBlockN - 1) / ScaleBlockN,
|
| 119 |
+
Scale_Stride_BN,
|
| 120 |
+
BLayout{}));
|
| 121 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 122 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 123 |
+
|
| 124 |
+
int total_gemm_needed =
|
| 125 |
+
a0_m_k.GetElementSpaceSizeInBytes() + b0_k_n.GetElementSpaceSizeInBytes() +
|
| 126 |
+
a1_m_k.GetElementSpaceSizeInBytes() + b1_k_n.GetElementSpaceSizeInBytes();
|
| 127 |
+
int rotating_count = std::max(
|
| 128 |
+
1,
|
| 129 |
+
std::min(n_iter,
|
| 130 |
+
static_cast<int>(std::ceil(static_cast<double>(rotating) / total_gemm_needed))));
|
| 131 |
+
|
| 132 |
+
std::cout << "a0_m_k: " << a0_m_k.mDesc << std::endl;
|
| 133 |
+
std::cout << "a1_m_k: " << a1_m_k.mDesc << std::endl;
|
| 134 |
+
std::cout << "b0_k_n: " << b0_k_n.mDesc << std::endl;
|
| 135 |
+
std::cout << "b1_k_n: " << b1_k_n.mDesc << std::endl;
|
| 136 |
+
std::cout << "e_m_n: " << e_m_n_device_result.mDesc << std::endl;
|
| 137 |
+
std::cout << "rotating count: " << rotating_count << std::endl;
|
| 138 |
+
|
| 139 |
+
switch(init_method)
|
| 140 |
+
{
|
| 141 |
+
case 0: break;
|
| 142 |
+
case 1:
|
| 143 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_2<A0DataType>{-2, 2});
|
| 144 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_2<B0DataType>{-2, 2});
|
| 145 |
+
a1_m_k.GenerateTensorValue(GeneratorTensor_3<A1DataType>{0, 1.0});
|
| 146 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_3<B1DataType>{0, 1.0});
|
| 147 |
+
break;
|
| 148 |
+
default:
|
| 149 |
+
a0_m_k.GenerateTensorValue(GeneratorTensor_3<A0DataType>{-0.5, 0.5});
|
| 150 |
+
b0_k_n.GenerateTensorValue(GeneratorTensor_3<B0DataType>{-0.5, 0.5});
|
| 151 |
+
a1_m_k.GenerateTensorValue(GeneratorTensor_3<A1DataType>{0, 1.0});
|
| 152 |
+
b1_k_n.GenerateTensorValue(GeneratorTensor_3<B1DataType>{0, 1.0});
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
preShuffleBuffer(b0_k_n.mData.data(), b_preshuffled_mfma16.mData.data(), N, K, 16);
|
| 156 |
+
preShuffleBuffer(b0_k_n.mData.data(), b_preshuffled_mfma32.mData.data(), N, K, 32);
|
| 157 |
+
|
| 158 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 159 |
+
|
| 160 |
+
using AElementOp = PassThrough;
|
| 161 |
+
using BElementOp = PassThrough;
|
| 162 |
+
using CElementOp = PassThrough;
|
| 163 |
+
|
| 164 |
+
const auto a_element_op = AElementOp{};
|
| 165 |
+
const auto b_element_op = BElementOp{};
|
| 166 |
+
const auto c_element_op = CElementOp{};
|
| 167 |
+
|
| 168 |
+
DeviceMem a0_device_buf(sizeof(A0DataType) * a0_m_k.mDesc.GetElementSpaceSize());
|
| 169 |
+
DeviceMem b_device_buf_mfma16(sizeof(B0DataType) * b0_k_n.mDesc.GetElementSpaceSize());
|
| 170 |
+
DeviceMem b_device_buf_mfma32(sizeof(B0DataType) * b0_k_n.mDesc.GetElementSpaceSize());
|
| 171 |
+
DeviceMem a1_device_buf(sizeof(A1DataType) * a1_m_k.mDesc.GetElementSpaceSize());
|
| 172 |
+
DeviceMem b1_device_buf(sizeof(B1DataType) * b1_k_n.mDesc.GetElementSpaceSize());
|
| 173 |
+
DeviceMem c_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 174 |
+
|
| 175 |
+
a0_device_buf.ToDevice(a0_m_k.mData.data());
|
| 176 |
+
b_device_buf_mfma16.ToDevice(b_preshuffled_mfma16.mData.data());
|
| 177 |
+
b_device_buf_mfma32.ToDevice(b_preshuffled_mfma32.mData.data());
|
| 178 |
+
a1_device_buf.ToDevice(a1_m_k.mData.data());
|
| 179 |
+
b1_device_buf.ToDevice(b1_k_n.mData.data());
|
| 180 |
+
|
| 181 |
+
using DeviceOp =
|
| 182 |
+
ck::tensor_operation::device::DeviceGemmMultipleD_BlockScale_BPreshuffle<ALayout,
|
| 183 |
+
BLayout,
|
| 184 |
+
ck::Tuple<>,
|
| 185 |
+
ELayout,
|
| 186 |
+
A0DataType,
|
| 187 |
+
A1DataType,
|
| 188 |
+
B0DataType,
|
| 189 |
+
B1DataType,
|
| 190 |
+
ck::Tuple<>,
|
| 191 |
+
EDataType,
|
| 192 |
+
ScaleBlockM,
|
| 193 |
+
ScaleBlockN,
|
| 194 |
+
ScaleBlockK,
|
| 195 |
+
AElementOp,
|
| 196 |
+
BElementOp,
|
| 197 |
+
CElementOp>;
|
| 198 |
+
|
| 199 |
+
// get device op instances
|
| 200 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 201 |
+
DeviceOp>::GetInstances();
|
| 202 |
+
|
| 203 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 204 |
+
|
| 205 |
+
// Run reference GEMM
|
| 206 |
+
if(do_verification)
|
| 207 |
+
{
|
| 208 |
+
Tensor<AccDataType> c_m_n({M, N});
|
| 209 |
+
Tensor<float> a_m_k({M, K});
|
| 210 |
+
Tensor<float> b_k_n({K, N});
|
| 211 |
+
|
| 212 |
+
for(int m = 0; m < M; m++)
|
| 213 |
+
{
|
| 214 |
+
for(int k = 0; k < K; k++)
|
| 215 |
+
{
|
| 216 |
+
a_m_k(m, k) = ck::type_convert<float>(a0_m_k(m, k)) *
|
| 217 |
+
a1_m_k(m / ScaleBlockM, k / ScaleBlockK);
|
| 218 |
+
}
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
for(int n = 0; n < N; n++)
|
| 222 |
+
{
|
| 223 |
+
for(int k = 0; k < K; k++)
|
| 224 |
+
{
|
| 225 |
+
b_k_n(k, n) = ck::type_convert<float>(b0_k_n(k, n)) *
|
| 226 |
+
b1_k_n(k / ScaleBlockK, n / ScaleBlockN);
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<float,
|
| 231 |
+
float,
|
| 232 |
+
AccDataType,
|
| 233 |
+
AccDataType,
|
| 234 |
+
AElementOp,
|
| 235 |
+
BElementOp,
|
| 236 |
+
PassThrough,
|
| 237 |
+
float>;
|
| 238 |
+
|
| 239 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 240 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 241 |
+
|
| 242 |
+
auto ref_argument =
|
| 243 |
+
ref_gemm.MakeArgument(a_m_k, b_k_n, c_m_n, PassThrough{}, PassThrough{}, PassThrough{});
|
| 244 |
+
|
| 245 |
+
ref_invoker.Run(ref_argument);
|
| 246 |
+
|
| 247 |
+
for(int m = 0; m < M; ++m)
|
| 248 |
+
{
|
| 249 |
+
for(int n = 0; n < N; ++n)
|
| 250 |
+
{
|
| 251 |
+
e_m_n_host_result(m, n) = ck::type_convert<EDataType>(c_m_n(m, n));
|
| 252 |
+
}
|
| 253 |
+
}
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
std::string best_op_name;
|
| 257 |
+
float best_ave_time = 0;
|
| 258 |
+
float best_tflops = 0;
|
| 259 |
+
float best_gb_per_sec = 0;
|
| 260 |
+
|
| 261 |
+
// profile device GEMM instances
|
| 262 |
+
for(auto& op_ptr : op_ptrs)
|
| 263 |
+
{
|
| 264 |
+
int NPerXdl = op_ptr->GetPreShuffleParameters();
|
| 265 |
+
|
| 266 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 267 |
+
static_cast<A0DataType*>(a0_device_buf.GetDeviceBuffer()),
|
| 268 |
+
static_cast<B0DataType*>(NPerXdl == 16 ? b_device_buf_mfma16.GetDeviceBuffer()
|
| 269 |
+
: b_device_buf_mfma32.GetDeviceBuffer()),
|
| 270 |
+
std::array<const void*, 0>{},
|
| 271 |
+
static_cast<EDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 272 |
+
M,
|
| 273 |
+
N,
|
| 274 |
+
K,
|
| 275 |
+
StrideA,
|
| 276 |
+
StrideB,
|
| 277 |
+
std::array<ck::index_t, 0>{},
|
| 278 |
+
StrideE,
|
| 279 |
+
a1_device_buf.GetDeviceBuffer(),
|
| 280 |
+
b1_device_buf.GetDeviceBuffer(),
|
| 281 |
+
a_element_op,
|
| 282 |
+
b_element_op,
|
| 283 |
+
c_element_op);
|
| 284 |
+
|
| 285 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 286 |
+
|
| 287 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 288 |
+
{
|
| 289 |
+
|
| 290 |
+
// re-init C to zero before profiling next kernel
|
| 291 |
+
c_device_buf.SetZero();
|
| 292 |
+
|
| 293 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 294 |
+
|
| 295 |
+
if(do_verification)
|
| 296 |
+
{
|
| 297 |
+
c_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
| 298 |
+
|
| 299 |
+
#if defined CK_ENABLE_FP8
|
| 300 |
+
// set softer tolerances for fp8
|
| 301 |
+
if constexpr(is_same_v<A0DataType, f8_t> || is_same_v<B0DataType, f8_t> ||
|
| 302 |
+
is_same_v<EDataType, f8_t>)
|
| 303 |
+
{
|
| 304 |
+
std::string msg = "Error: Incorrect results!";
|
| 305 |
+
double rtol = 5e-2;
|
| 306 |
+
double atol = 5e-2;
|
| 307 |
+
bool current_pass = ck::utils::check_err(
|
| 308 |
+
e_m_n_device_result, e_m_n_host_result, msg, rtol, atol);
|
| 309 |
+
pass = pass & current_pass;
|
| 310 |
+
if(!current_pass)
|
| 311 |
+
{
|
| 312 |
+
std::cout << op_ptr->GetTypeString() << " failed" << std::endl;
|
| 313 |
+
}
|
| 314 |
+
}
|
| 315 |
+
else
|
| 316 |
+
{
|
| 317 |
+
#endif
|
| 318 |
+
pass = pass & ck::utils::check_err(e_m_n_device_result, e_m_n_host_result);
|
| 319 |
+
if(!pass)
|
| 320 |
+
{
|
| 321 |
+
std::cout << op_ptr->GetTypeString() << " failed" << std::endl;
|
| 322 |
+
}
|
| 323 |
+
#if defined CK_ENABLE_FP8
|
| 324 |
+
}
|
| 325 |
+
#endif
|
| 326 |
+
|
| 327 |
+
if(do_log)
|
| 328 |
+
{
|
| 329 |
+
LogRangeAsType<float>(std::cout << "a : ", a0_m_k.mData, ",") << std::endl;
|
| 330 |
+
LogRangeAsType<float>(std::cout << "b: ", b0_k_n.mData, ",") << std::endl;
|
| 331 |
+
LogRangeAsType<float>(std::cout << "c_host : ", e_m_n_host_result.mData, ",")
|
| 332 |
+
<< std::endl;
|
| 333 |
+
LogRangeAsType<float>(std::cout << "c_device: ", e_m_n_device_result.mData, ",")
|
| 334 |
+
<< std::endl;
|
| 335 |
+
}
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 339 |
+
|
| 340 |
+
float ave_time = invoker_ptr->Run(
|
| 341 |
+
argument_ptr.get(),
|
| 342 |
+
StreamConfig{
|
| 343 |
+
nullptr, time_kernel, 0, n_warmup, n_iter, rotating_count > 1, rotating_count});
|
| 344 |
+
|
| 345 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 346 |
+
|
| 347 |
+
std::size_t num_btype =
|
| 348 |
+
sizeof(A0DataType) * M * K + sizeof(B0DataType) * K * N + sizeof(EDataType) * M * N;
|
| 349 |
+
|
| 350 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 351 |
+
|
| 352 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 353 |
+
|
| 354 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, "
|
| 355 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 356 |
+
|
| 357 |
+
if(tflops > best_tflops)
|
| 358 |
+
{
|
| 359 |
+
best_op_name = op_name;
|
| 360 |
+
best_tflops = tflops;
|
| 361 |
+
best_ave_time = ave_time;
|
| 362 |
+
best_gb_per_sec = gb_per_sec;
|
| 363 |
+
}
|
| 364 |
+
}
|
| 365 |
+
else
|
| 366 |
+
{
|
| 367 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 368 |
+
}
|
| 369 |
+
}
|
| 370 |
+
|
| 371 |
+
if constexpr(is_same<EDataType, float>::value)
|
| 372 |
+
{
|
| 373 |
+
std::cout << "Best Perf for datatype = f32";
|
| 374 |
+
}
|
| 375 |
+
else if constexpr(is_same<EDataType, half_t>::value)
|
| 376 |
+
{
|
| 377 |
+
std::cout << "Best Perf for datatype = f16";
|
| 378 |
+
}
|
| 379 |
+
else if constexpr(is_same<EDataType, bhalf_t>::value)
|
| 380 |
+
{
|
| 381 |
+
std::cout << "Best Perf for datatype = bf16";
|
| 382 |
+
}
|
| 383 |
+
else if constexpr(is_same<EDataType, int8_t>::value)
|
| 384 |
+
{
|
| 385 |
+
std::cout << "Best Perf for datatype = int8";
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value)
|
| 389 |
+
{
|
| 390 |
+
std::cout << " ALayout = RowMajor";
|
| 391 |
+
}
|
| 392 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 393 |
+
{
|
| 394 |
+
std::cout << " ALayout = ColumnMajor";
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
if constexpr(is_same<BLayout, tensor_layout::gemm::RowMajor>::value)
|
| 398 |
+
{
|
| 399 |
+
std::cout << " BLayout = RowMajor";
|
| 400 |
+
}
|
| 401 |
+
else if constexpr(is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 402 |
+
{
|
| 403 |
+
std::cout << " BLayout = ColumnMajor";
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
std::cout << " M = " << M << " N = " << N << " K = " << K << " StrideA = " << StrideA
|
| 407 |
+
<< " StrideB = " << StrideB << " StrideE = " << StrideE << " : " << best_ave_time
|
| 408 |
+
<< " ms, " << best_tflops << " TFlops, " << best_gb_per_sec << " GB/s, "
|
| 409 |
+
<< best_op_name << std::endl;
|
| 410 |
+
|
| 411 |
+
return pass;
|
| 412 |
+
}
|
| 413 |
+
|
| 414 |
+
} // namespace profiler
|
| 415 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_multiply_add_impl.hpp
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_multiple_d.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 12 |
+
|
| 13 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_multiply_add.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/utility/check_err.hpp"
|
| 16 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 17 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 18 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 19 |
+
#include "ck/library/utility/literals.hpp"
|
| 20 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 21 |
+
|
| 22 |
+
namespace ck {
|
| 23 |
+
namespace profiler {
|
| 24 |
+
|
| 25 |
+
template <typename ADataType,
|
| 26 |
+
typename BDataType,
|
| 27 |
+
typename AccDataType,
|
| 28 |
+
typename D0DataType,
|
| 29 |
+
typename D1DataType,
|
| 30 |
+
typename EDataType,
|
| 31 |
+
typename ALayout,
|
| 32 |
+
typename BLayout,
|
| 33 |
+
typename D0Layout,
|
| 34 |
+
typename D1Layout,
|
| 35 |
+
typename ELayout>
|
| 36 |
+
bool profile_gemm_multiply_add_impl(int do_verification,
|
| 37 |
+
int init_method,
|
| 38 |
+
bool /*do_log*/,
|
| 39 |
+
bool time_kernel,
|
| 40 |
+
int M,
|
| 41 |
+
int N,
|
| 42 |
+
int K,
|
| 43 |
+
int StrideA,
|
| 44 |
+
int StrideB,
|
| 45 |
+
int StrideD0,
|
| 46 |
+
int StrideD1,
|
| 47 |
+
int StrideE)
|
| 48 |
+
{
|
| 49 |
+
auto f_host_tensor_descriptor =
|
| 50 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 51 |
+
using namespace ck::literals;
|
| 52 |
+
|
| 53 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 54 |
+
{
|
| 55 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 56 |
+
}
|
| 57 |
+
else
|
| 58 |
+
{
|
| 59 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 60 |
+
}
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 64 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 65 |
+
Tensor<D0DataType> d0_m_n(f_host_tensor_descriptor(M, N, StrideD0, D0Layout{}));
|
| 66 |
+
Tensor<D1DataType> d1_m_n(f_host_tensor_descriptor(M, N, StrideD1, D1Layout{}));
|
| 67 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 68 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 69 |
+
|
| 70 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 71 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 72 |
+
std::cout << "d0_m_n: " << d0_m_n.mDesc << std::endl;
|
| 73 |
+
std::cout << "d1_m_n: " << d1_m_n.mDesc << std::endl;
|
| 74 |
+
std::cout << "e_m_n: " << e_m_n_device_result.mDesc << std::endl;
|
| 75 |
+
|
| 76 |
+
switch(init_method)
|
| 77 |
+
{
|
| 78 |
+
case 0: break;
|
| 79 |
+
case 1:
|
| 80 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
| 81 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
| 82 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_2<D0DataType>{-5, 5});
|
| 83 |
+
d1_m_n.GenerateTensorValue(GeneratorTensor_2<D1DataType>{-1, 1});
|
| 84 |
+
break;
|
| 85 |
+
default:
|
| 86 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 0.2});
|
| 87 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.1, 0.1});
|
| 88 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{0.0, 1.0});
|
| 89 |
+
d1_m_n.GenerateTensorValue(GeneratorTensor_3<D1DataType>{0.0, 1.0});
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 93 |
+
using MultiplyAdd = ck::tensor_operation::element_wise::MultiplyAdd;
|
| 94 |
+
|
| 95 |
+
using AElementOp = PassThrough;
|
| 96 |
+
using BElementOp = PassThrough;
|
| 97 |
+
using CDEElementOp = MultiplyAdd;
|
| 98 |
+
|
| 99 |
+
const auto a_element_op = AElementOp{};
|
| 100 |
+
const auto b_element_op = BElementOp{};
|
| 101 |
+
const auto cde_element_op = CDEElementOp{};
|
| 102 |
+
|
| 103 |
+
using DeviceOp =
|
| 104 |
+
ck::tensor_operation::device::DeviceGemmMultipleD<ALayout,
|
| 105 |
+
BLayout,
|
| 106 |
+
ck::Tuple<D0Layout, D1Layout>,
|
| 107 |
+
ELayout,
|
| 108 |
+
ADataType,
|
| 109 |
+
BDataType,
|
| 110 |
+
ck::Tuple<D0DataType, D1DataType>,
|
| 111 |
+
EDataType,
|
| 112 |
+
PassThrough,
|
| 113 |
+
PassThrough,
|
| 114 |
+
CDEElementOp>;
|
| 115 |
+
|
| 116 |
+
// get device op instances
|
| 117 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 118 |
+
DeviceOp>::GetInstances();
|
| 119 |
+
|
| 120 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 121 |
+
|
| 122 |
+
// run reference
|
| 123 |
+
if(do_verification)
|
| 124 |
+
{
|
| 125 |
+
Tensor<AccDataType> c_m_n({M, N});
|
| 126 |
+
|
| 127 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 128 |
+
BDataType,
|
| 129 |
+
AccDataType,
|
| 130 |
+
AccDataType,
|
| 131 |
+
AElementOp,
|
| 132 |
+
BElementOp,
|
| 133 |
+
PassThrough>;
|
| 134 |
+
|
| 135 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 136 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 137 |
+
|
| 138 |
+
auto ref_argument =
|
| 139 |
+
ref_gemm.MakeArgument(a_m_k, b_k_n, c_m_n, a_element_op, b_element_op, PassThrough{});
|
| 140 |
+
|
| 141 |
+
ref_invoker.Run(ref_argument);
|
| 142 |
+
|
| 143 |
+
for(int m = 0; m < M; ++m)
|
| 144 |
+
{
|
| 145 |
+
for(int n = 0; n < N; ++n)
|
| 146 |
+
{
|
| 147 |
+
cde_element_op(e_m_n_host_result(m, n), c_m_n(m, n), d0_m_n(m, n), d1_m_n(m, n));
|
| 148 |
+
}
|
| 149 |
+
}
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 153 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 154 |
+
DeviceMem d0_m_n_device_buf(sizeof(D0DataType) * d0_m_n.mDesc.GetElementSpaceSize());
|
| 155 |
+
DeviceMem d1_m_n_device_buf(sizeof(D1DataType) * d1_m_n.mDesc.GetElementSpaceSize());
|
| 156 |
+
DeviceMem e_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 157 |
+
|
| 158 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 159 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 160 |
+
d0_m_n_device_buf.ToDevice(d0_m_n.mData.data());
|
| 161 |
+
d1_m_n_device_buf.ToDevice(d1_m_n.mData.data());
|
| 162 |
+
|
| 163 |
+
std::string best_op_name;
|
| 164 |
+
float best_ave_time = 0;
|
| 165 |
+
float best_tflops = 0;
|
| 166 |
+
float best_gb_per_sec = 0;
|
| 167 |
+
|
| 168 |
+
bool pass = true;
|
| 169 |
+
|
| 170 |
+
// profile device operation instances
|
| 171 |
+
for(auto& op_ptr : op_ptrs)
|
| 172 |
+
{
|
| 173 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 174 |
+
a_device_buf.GetDeviceBuffer(),
|
| 175 |
+
b_device_buf.GetDeviceBuffer(),
|
| 176 |
+
std::array<const void*, 2>{d0_m_n_device_buf.GetDeviceBuffer(),
|
| 177 |
+
d1_m_n_device_buf.GetDeviceBuffer()},
|
| 178 |
+
e_device_buf.GetDeviceBuffer(),
|
| 179 |
+
M,
|
| 180 |
+
N,
|
| 181 |
+
K,
|
| 182 |
+
StrideA,
|
| 183 |
+
StrideB,
|
| 184 |
+
std::array<ck::index_t, 2>{StrideD0, StrideD1},
|
| 185 |
+
StrideE,
|
| 186 |
+
a_element_op,
|
| 187 |
+
b_element_op,
|
| 188 |
+
cde_element_op);
|
| 189 |
+
|
| 190 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 191 |
+
|
| 192 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 193 |
+
|
| 194 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 195 |
+
{
|
| 196 |
+
// re-init E to zero before profiling a kernel
|
| 197 |
+
e_device_buf.SetZero();
|
| 198 |
+
|
| 199 |
+
float ave_time =
|
| 200 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 201 |
+
|
| 202 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 203 |
+
|
| 204 |
+
std::size_t num_btype =
|
| 205 |
+
sizeof(ADataType) * M * K + sizeof(BDataType) * K * N + sizeof(EDataType) * M * N;
|
| 206 |
+
|
| 207 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 208 |
+
|
| 209 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 210 |
+
|
| 211 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, "
|
| 212 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 213 |
+
|
| 214 |
+
if(tflops > best_tflops)
|
| 215 |
+
{
|
| 216 |
+
best_op_name = op_name;
|
| 217 |
+
best_tflops = tflops;
|
| 218 |
+
best_ave_time = ave_time;
|
| 219 |
+
best_gb_per_sec = gb_per_sec;
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
if(do_verification)
|
| 223 |
+
{
|
| 224 |
+
e_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
| 225 |
+
|
| 226 |
+
pass = pass && ck::utils::check_err(e_m_n_device_result, e_m_n_host_result);
|
| 227 |
+
}
|
| 228 |
+
}
|
| 229 |
+
else
|
| 230 |
+
{
|
| 231 |
+
std::cout << op_name << " does not support this problem" << std::endl;
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 236 |
+
<< best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
| 237 |
+
|
| 238 |
+
return pass;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
} // namespace profiler
|
| 242 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_multiply_multiply_impl.hpp
ADDED
|
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_xdl_cshuffle_v3.hpp"
|
| 13 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_multiply_multiply.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/literals.hpp"
|
| 22 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 23 |
+
|
| 24 |
+
namespace ck {
|
| 25 |
+
namespace profiler {
|
| 26 |
+
|
| 27 |
+
template <typename ADataType,
|
| 28 |
+
typename BDataType,
|
| 29 |
+
typename ComputeDataType,
|
| 30 |
+
typename AccDataType,
|
| 31 |
+
typename D0DataType,
|
| 32 |
+
typename D1DataType,
|
| 33 |
+
typename EDataType,
|
| 34 |
+
typename ALayout,
|
| 35 |
+
typename BLayout,
|
| 36 |
+
typename D0Layout,
|
| 37 |
+
typename D1Layout,
|
| 38 |
+
typename ELayout>
|
| 39 |
+
bool profile_gemm_multiply_multiply_impl(int do_verification,
|
| 40 |
+
int init_method,
|
| 41 |
+
bool do_log,
|
| 42 |
+
bool time_kernel,
|
| 43 |
+
int M,
|
| 44 |
+
int N,
|
| 45 |
+
int K,
|
| 46 |
+
int StrideA,
|
| 47 |
+
int StrideB,
|
| 48 |
+
int StrideD0,
|
| 49 |
+
int StrideD1,
|
| 50 |
+
int StrideE,
|
| 51 |
+
int KBatch,
|
| 52 |
+
int n_warmup,
|
| 53 |
+
int n_iter,
|
| 54 |
+
uint64_t rotating = 0)
|
| 55 |
+
{
|
| 56 |
+
bool pass = true;
|
| 57 |
+
|
| 58 |
+
auto f_host_tensor_descriptor =
|
| 59 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 60 |
+
using namespace ck::literals;
|
| 61 |
+
|
| 62 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 63 |
+
{
|
| 64 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 65 |
+
}
|
| 66 |
+
else
|
| 67 |
+
{
|
| 68 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 69 |
+
}
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 73 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 74 |
+
Tensor<D0DataType> d0_m_n(f_host_tensor_descriptor(M, N, StrideD0, D0Layout{}));
|
| 75 |
+
Tensor<D1DataType> d1_m_n(f_host_tensor_descriptor(M, N, StrideD1, D1Layout{}));
|
| 76 |
+
Tensor<EDataType> e_m_n_host_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 77 |
+
Tensor<EDataType> e_m_n_device_result(f_host_tensor_descriptor(M, N, StrideE, ELayout{}));
|
| 78 |
+
|
| 79 |
+
int total_gemm_needed =
|
| 80 |
+
a_m_k.GetElementSpaceSizeInBytes() + b_k_n.GetElementSpaceSizeInBytes() +
|
| 81 |
+
d0_m_n.GetElementSpaceSizeInBytes() + d1_m_n.GetElementSpaceSizeInBytes();
|
| 82 |
+
int rotating_count = std::max(
|
| 83 |
+
1,
|
| 84 |
+
std::min(n_iter,
|
| 85 |
+
static_cast<int>(std::ceil(static_cast<double>(rotating) / total_gemm_needed))));
|
| 86 |
+
|
| 87 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 88 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 89 |
+
std::cout << "d0_m_n: " << d0_m_n.mDesc << std::endl;
|
| 90 |
+
std::cout << "d1_m_n: " << d1_m_n.mDesc << std::endl;
|
| 91 |
+
std::cout << "e_m_n: " << e_m_n_device_result.mDesc << std::endl;
|
| 92 |
+
std::cout << "rotating count: " << rotating_count << std::endl;
|
| 93 |
+
|
| 94 |
+
switch(init_method)
|
| 95 |
+
{
|
| 96 |
+
case 0: break;
|
| 97 |
+
case 1:
|
| 98 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-1, 2});
|
| 99 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-1, 2});
|
| 100 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_2<D0DataType>{-5, 5});
|
| 101 |
+
d1_m_n.GenerateTensorValue(GeneratorTensor_2<D1DataType>{-1, 1});
|
| 102 |
+
break;
|
| 103 |
+
default:
|
| 104 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 105 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
| 106 |
+
d0_m_n.GenerateTensorValue(GeneratorTensor_3<D0DataType>{0.0, 1.0});
|
| 107 |
+
d1_m_n.GenerateTensorValue(GeneratorTensor_3<D1DataType>{0.0, 1.0});
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 111 |
+
using MultiplyMultiply = ck::tensor_operation::element_wise::MultiplyMultiply;
|
| 112 |
+
|
| 113 |
+
using AElementOp = PassThrough;
|
| 114 |
+
using BElementOp = PassThrough;
|
| 115 |
+
using CElementOp = MultiplyMultiply;
|
| 116 |
+
|
| 117 |
+
const auto a_element_op = AElementOp{};
|
| 118 |
+
const auto b_element_op = BElementOp{};
|
| 119 |
+
const auto c_element_op = CElementOp{};
|
| 120 |
+
|
| 121 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 122 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 123 |
+
DeviceMem d0_device_buf(sizeof(D0DataType) * d0_m_n.mDesc.GetElementSpaceSize());
|
| 124 |
+
DeviceMem d1_device_buf(sizeof(D1DataType) * d1_m_n.mDesc.GetElementSpaceSize());
|
| 125 |
+
DeviceMem c_device_buf(sizeof(EDataType) * e_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 126 |
+
|
| 127 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 128 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 129 |
+
d0_device_buf.ToDevice(d0_m_n.mData.data());
|
| 130 |
+
d1_device_buf.ToDevice(d1_m_n.mData.data());
|
| 131 |
+
|
| 132 |
+
using DeviceOp =
|
| 133 |
+
ck::tensor_operation::device::DeviceGemmMultipleDSplitK<ALayout,
|
| 134 |
+
BLayout,
|
| 135 |
+
ck::Tuple<D0Layout, D1Layout>,
|
| 136 |
+
ELayout,
|
| 137 |
+
ADataType,
|
| 138 |
+
BDataType,
|
| 139 |
+
ck::Tuple<D0DataType, D1DataType>,
|
| 140 |
+
EDataType,
|
| 141 |
+
AElementOp,
|
| 142 |
+
BElementOp,
|
| 143 |
+
CElementOp>;
|
| 144 |
+
|
| 145 |
+
// get device op instances
|
| 146 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 147 |
+
DeviceOp>::GetInstances();
|
| 148 |
+
|
| 149 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 150 |
+
|
| 151 |
+
// Run reference GEMM
|
| 152 |
+
if(do_verification)
|
| 153 |
+
{
|
| 154 |
+
Tensor<AccDataType> c_m_n({M, N});
|
| 155 |
+
|
| 156 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 157 |
+
BDataType,
|
| 158 |
+
AccDataType,
|
| 159 |
+
AccDataType,
|
| 160 |
+
AElementOp,
|
| 161 |
+
BElementOp,
|
| 162 |
+
PassThrough,
|
| 163 |
+
ComputeDataType>;
|
| 164 |
+
|
| 165 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 166 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 167 |
+
|
| 168 |
+
auto ref_argument =
|
| 169 |
+
ref_gemm.MakeArgument(a_m_k, b_k_n, c_m_n, PassThrough{}, PassThrough{}, PassThrough{});
|
| 170 |
+
|
| 171 |
+
ref_invoker.Run(ref_argument);
|
| 172 |
+
|
| 173 |
+
for(int m = 0; m < M; ++m)
|
| 174 |
+
{
|
| 175 |
+
for(int n = 0; n < N; ++n)
|
| 176 |
+
{
|
| 177 |
+
c_element_op(e_m_n_host_result(m, n), c_m_n(m, n), d0_m_n(m, n), d1_m_n(m, n));
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
std::string best_op_name;
|
| 183 |
+
float best_ave_time = 0;
|
| 184 |
+
float best_tflops = 0;
|
| 185 |
+
float best_gb_per_sec = 0;
|
| 186 |
+
float best_kbatch = 0;
|
| 187 |
+
|
| 188 |
+
// profile device GEMM instances
|
| 189 |
+
for(auto& op_ptr : op_ptrs)
|
| 190 |
+
{
|
| 191 |
+
// Seems like when performance measurement has bug when spiltK is large
|
| 192 |
+
std::vector<int> kbatch_list = {1, 2, 4, 8, 16};
|
| 193 |
+
|
| 194 |
+
if(KBatch > 0)
|
| 195 |
+
{
|
| 196 |
+
kbatch_list = {KBatch};
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
for(std::size_t i = 0; i < kbatch_list.size(); i++)
|
| 200 |
+
{
|
| 201 |
+
auto kbatch_curr = kbatch_list[i];
|
| 202 |
+
|
| 203 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 204 |
+
static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
| 205 |
+
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
|
| 206 |
+
std::array<const void*, 2>{d0_device_buf.GetDeviceBuffer(),
|
| 207 |
+
d1_device_buf.GetDeviceBuffer()},
|
| 208 |
+
static_cast<EDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 209 |
+
M,
|
| 210 |
+
N,
|
| 211 |
+
K,
|
| 212 |
+
StrideA,
|
| 213 |
+
StrideB,
|
| 214 |
+
std::array<ck::index_t, 2>{StrideD0, StrideD1},
|
| 215 |
+
StrideE,
|
| 216 |
+
kbatch_curr,
|
| 217 |
+
a_element_op,
|
| 218 |
+
b_element_op,
|
| 219 |
+
c_element_op);
|
| 220 |
+
|
| 221 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 222 |
+
|
| 223 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 224 |
+
{
|
| 225 |
+
|
| 226 |
+
// re-init C to zero before profiling next kernel
|
| 227 |
+
c_device_buf.SetZero();
|
| 228 |
+
|
| 229 |
+
invoker_ptr->Run(argument_ptr.get(),
|
| 230 |
+
StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 231 |
+
|
| 232 |
+
if(do_verification)
|
| 233 |
+
{
|
| 234 |
+
c_device_buf.FromDevice(e_m_n_device_result.mData.data());
|
| 235 |
+
|
| 236 |
+
#if defined CK_ENABLE_FP8 || defined CK_ENABLE_INT8
|
| 237 |
+
// set softer tolerances for fp8
|
| 238 |
+
if constexpr((is_same_v<ADataType, f8_t> || is_same_v<BDataType, f8_t> ||
|
| 239 |
+
is_same_v<EDataType, f8_t>) ||
|
| 240 |
+
(is_same_v<ADataType, int8_t> || is_same_v<BDataType, int8_t> ||
|
| 241 |
+
is_same_v<EDataType, int8_t>))
|
| 242 |
+
{
|
| 243 |
+
std::string msg = "Error: Incorrect results!";
|
| 244 |
+
double rtol = 1e-1;
|
| 245 |
+
double atol = 1e-1;
|
| 246 |
+
pass = pass & ck::utils::check_err(
|
| 247 |
+
e_m_n_device_result, e_m_n_host_result, msg, rtol, atol);
|
| 248 |
+
}
|
| 249 |
+
else
|
| 250 |
+
{
|
| 251 |
+
#endif
|
| 252 |
+
pass = pass & ck::utils::check_err(e_m_n_device_result, e_m_n_host_result);
|
| 253 |
+
#if defined CK_ENABLE_FP8 || defined CK_ENABLE_INT8
|
| 254 |
+
}
|
| 255 |
+
#endif
|
| 256 |
+
|
| 257 |
+
if(do_log)
|
| 258 |
+
{
|
| 259 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k.mData, ",") << std::endl;
|
| 260 |
+
LogRangeAsType<float>(std::cout << "b: ", b_k_n.mData, ",") << std::endl;
|
| 261 |
+
LogRangeAsType<float>(
|
| 262 |
+
std::cout << "c_host : ", e_m_n_host_result.mData, ",")
|
| 263 |
+
<< std::endl;
|
| 264 |
+
LogRangeAsType<float>(
|
| 265 |
+
std::cout << "c_device: ", e_m_n_device_result.mData, ",")
|
| 266 |
+
<< std::endl;
|
| 267 |
+
}
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 271 |
+
|
| 272 |
+
// timer of develop branch should only apply to empty hipstream
|
| 273 |
+
// hipStream_t stream;
|
| 274 |
+
// hip_check_error(hipStreamCreate(&stream));
|
| 275 |
+
|
| 276 |
+
float ave_time = invoker_ptr->Run(argument_ptr.get(),
|
| 277 |
+
StreamConfig{nullptr,
|
| 278 |
+
time_kernel,
|
| 279 |
+
0,
|
| 280 |
+
n_warmup,
|
| 281 |
+
n_iter,
|
| 282 |
+
rotating_count > 1,
|
| 283 |
+
rotating_count});
|
| 284 |
+
|
| 285 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 286 |
+
|
| 287 |
+
std::size_t num_btype = sizeof(ADataType) * M * K + sizeof(BDataType) * K * N +
|
| 288 |
+
sizeof(EDataType) * M * N;
|
| 289 |
+
|
| 290 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 291 |
+
|
| 292 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 293 |
+
|
| 294 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops
|
| 295 |
+
<< " TFlops, " << gb_per_sec << " GB/s, " << op_name << ", KBatch "
|
| 296 |
+
<< kbatch_curr << std::endl;
|
| 297 |
+
|
| 298 |
+
if(tflops > best_tflops && ave_time > 1e-10)
|
| 299 |
+
{
|
| 300 |
+
best_op_name = op_name;
|
| 301 |
+
best_tflops = tflops;
|
| 302 |
+
best_ave_time = ave_time;
|
| 303 |
+
best_gb_per_sec = gb_per_sec;
|
| 304 |
+
best_kbatch = kbatch_curr;
|
| 305 |
+
}
|
| 306 |
+
}
|
| 307 |
+
else
|
| 308 |
+
{
|
| 309 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem"
|
| 310 |
+
<< std::endl;
|
| 311 |
+
}
|
| 312 |
+
}
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
if constexpr(is_same<EDataType, float>::value)
|
| 316 |
+
{
|
| 317 |
+
std::cout << "Best Perf for datatype = f32";
|
| 318 |
+
}
|
| 319 |
+
else if constexpr(is_same<EDataType, half_t>::value)
|
| 320 |
+
{
|
| 321 |
+
std::cout << "Best Perf for datatype = f16";
|
| 322 |
+
}
|
| 323 |
+
else if constexpr(is_same<EDataType, bhalf_t>::value)
|
| 324 |
+
{
|
| 325 |
+
std::cout << "Best Perf for datatype = bf16";
|
| 326 |
+
}
|
| 327 |
+
else if constexpr(is_same<EDataType, int8_t>::value)
|
| 328 |
+
{
|
| 329 |
+
std::cout << "Best Perf for datatype = int8";
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value)
|
| 333 |
+
{
|
| 334 |
+
std::cout << " ALayout = RowMajor";
|
| 335 |
+
}
|
| 336 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 337 |
+
{
|
| 338 |
+
std::cout << " ALayout = ColumnMajor";
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
if constexpr(is_same<BLayout, tensor_layout::gemm::RowMajor>::value)
|
| 342 |
+
{
|
| 343 |
+
std::cout << " BLayout = RowMajor";
|
| 344 |
+
}
|
| 345 |
+
else if constexpr(is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 346 |
+
{
|
| 347 |
+
std::cout << " BLayout = ColumnMajor";
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
std::cout << " M = " << M << " N = " << N << " K = " << K << " StrideA = " << StrideA
|
| 351 |
+
<< " StrideB = " << StrideB << " StrideE = " << StrideE << " KBatch = " << best_kbatch
|
| 352 |
+
<< " : " << best_ave_time << " ms, " << best_tflops << " TFlops, " << best_gb_per_sec
|
| 353 |
+
<< " GB/s, " << best_op_name << std::endl;
|
| 354 |
+
|
| 355 |
+
return pass;
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
} // namespace profiler
|
| 359 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_mx_impl.hpp
ADDED
|
@@ -0,0 +1,534 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2025, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_mx_gemm.hpp"
|
| 12 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_mx.hpp"
|
| 13 |
+
#include "ck/library/utility/check_err.hpp"
|
| 14 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 15 |
+
#include "ck/library/utility/fill.hpp"
|
| 16 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 17 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 18 |
+
#include "ck/library/utility/literals.hpp"
|
| 19 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle_v3_mx.hpp"
|
| 20 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 21 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 22 |
+
#include "ck/utility/data_type.hpp"
|
| 23 |
+
|
| 24 |
+
namespace ck {
|
| 25 |
+
namespace profiler {
|
| 26 |
+
|
| 27 |
+
#if 1
|
| 28 |
+
template <bool KLast>
|
| 29 |
+
void preShuffleScaleBuffer(ck::e8m0_bexp_t* src, ck::e8m0_bexp_t* dst, int MN, int K)
|
| 30 |
+
{
|
| 31 |
+
int MNXdlPack = 2;
|
| 32 |
+
int KXdlPack = 2;
|
| 33 |
+
|
| 34 |
+
int XdlMNThread = 16;
|
| 35 |
+
int XdlKThread = 64 / XdlMNThread;
|
| 36 |
+
|
| 37 |
+
int K0 = K / KXdlPack / XdlKThread; // KRepeat
|
| 38 |
+
|
| 39 |
+
// The 4 16x128 building blocks will be packed into 1 32x256 for F4
|
| 40 |
+
// The 8 16x16x128 mfma will be packed into 1 32x32x256 for F4
|
| 41 |
+
|
| 42 |
+
// unfold the MN32xK(256/32) scale buffer
|
| 43 |
+
// 4 16 2 2
|
| 44 |
+
// To XdlKThread-> XdlMNThread -> KXdlPack -> MNXdlPack
|
| 45 |
+
// Then, MNRepeat->KRepeat
|
| 46 |
+
|
| 47 |
+
for(int n = 0; n < MN; ++n)
|
| 48 |
+
{
|
| 49 |
+
for(int k = 0; k < K; ++k)
|
| 50 |
+
{
|
| 51 |
+
int n0 = n / (XdlMNThread * MNXdlPack); // i MNRepeat
|
| 52 |
+
int tempn = n % (XdlMNThread * MNXdlPack);
|
| 53 |
+
int n1 = tempn % XdlMNThread; // i XdlMNThread
|
| 54 |
+
int n2 = tempn / XdlMNThread; // i MNXdlPack
|
| 55 |
+
|
| 56 |
+
int k0 = k / (XdlKThread * KXdlPack); // i KRepeat
|
| 57 |
+
int tempk = k % (XdlKThread * KXdlPack);
|
| 58 |
+
int k1 = tempk % XdlKThread; // i XdlKThread
|
| 59 |
+
int k2 = tempk / XdlKThread; // i KXdlPack
|
| 60 |
+
|
| 61 |
+
int outputIndex = n0 * MNXdlPack * KXdlPack * XdlMNThread * XdlKThread * K0 +
|
| 62 |
+
k0 * MNXdlPack * KXdlPack * XdlMNThread * XdlKThread +
|
| 63 |
+
k1 * MNXdlPack * KXdlPack * XdlMNThread + n1 * MNXdlPack * KXdlPack +
|
| 64 |
+
k2 * MNXdlPack + n2;
|
| 65 |
+
// src[n * K + k] = ck::type_convert<ck::e8m0_bexp_t>(static_cast<float>(powf(2.0f, n2 +
|
| 66 |
+
// k2 * MNXdlPack)));
|
| 67 |
+
if constexpr(KLast)
|
| 68 |
+
dst[outputIndex] = src[n * K + k];
|
| 69 |
+
else
|
| 70 |
+
dst[outputIndex] = src[k * MN + n];
|
| 71 |
+
}
|
| 72 |
+
}
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
void preShuffleBuffer(const ck::f4x2_pk_t* src, ck::f4x2_pk_t* dst, int N, int K, int NXdl)
|
| 76 |
+
{
|
| 77 |
+
int KPack = 16;
|
| 78 |
+
int NLane = NXdl;
|
| 79 |
+
int KLane = 64 / NLane;
|
| 80 |
+
int K_pk = K / 2;
|
| 81 |
+
int K0 = K_pk / (KLane * KPack);
|
| 82 |
+
// K -> K0 KLane KPack
|
| 83 |
+
// N -> N0 NLane
|
| 84 |
+
// N, K -> N0 K0 KLane NLane KPack
|
| 85 |
+
int tempk;
|
| 86 |
+
for(int n = 0; n < N; ++n)
|
| 87 |
+
{
|
| 88 |
+
for(int k = 0; k < K_pk; ++k)
|
| 89 |
+
{
|
| 90 |
+
int n0 = n / NLane;
|
| 91 |
+
int n1 = n % NLane;
|
| 92 |
+
|
| 93 |
+
int k0 = k / (KLane * KPack);
|
| 94 |
+
tempk = k % (KLane * KPack);
|
| 95 |
+
int k1 = tempk / KPack;
|
| 96 |
+
int k2 = tempk % KPack;
|
| 97 |
+
|
| 98 |
+
int outputIndex = n0 * KPack * NLane * KLane * K0 + k0 * KPack * NLane * KLane +
|
| 99 |
+
k1 * KPack * NLane + n1 * KPack + k2;
|
| 100 |
+
|
| 101 |
+
dst[outputIndex] = src[n * K_pk + k];
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
}
|
| 105 |
+
#endif
|
| 106 |
+
|
| 107 |
+
template <typename ADataType,
|
| 108 |
+
typename BDataType,
|
| 109 |
+
typename CDataType,
|
| 110 |
+
typename ALayout,
|
| 111 |
+
typename BLayout,
|
| 112 |
+
typename CLayout,
|
| 113 |
+
int ScaleBlockSize>
|
| 114 |
+
bool profile_gemm_mx_impl(int do_verification,
|
| 115 |
+
int init_method,
|
| 116 |
+
bool do_log,
|
| 117 |
+
bool time_kernel,
|
| 118 |
+
int M,
|
| 119 |
+
int N,
|
| 120 |
+
int K,
|
| 121 |
+
int StrideA,
|
| 122 |
+
int StrideB,
|
| 123 |
+
int StrideC,
|
| 124 |
+
int KBatch,
|
| 125 |
+
int n_warmup,
|
| 126 |
+
int n_iter,
|
| 127 |
+
uint64_t rotating = 0)
|
| 128 |
+
{
|
| 129 |
+
using tensor_operation::device::instance::Col;
|
| 130 |
+
using tensor_operation::device::instance::E8M0;
|
| 131 |
+
using tensor_operation::device::instance::E8M0PK;
|
| 132 |
+
using tensor_operation::device::instance::MFMA;
|
| 133 |
+
using tensor_operation::device::instance::Row;
|
| 134 |
+
|
| 135 |
+
constexpr bool BPreShuffle = is_same_v<BLayout, MFMA>;
|
| 136 |
+
using BRefLayout = conditional_t<BPreShuffle, Col, BLayout>;
|
| 137 |
+
|
| 138 |
+
if(K % ScaleBlockSize != 0)
|
| 139 |
+
{
|
| 140 |
+
throw std::runtime_error("wrong! K must be multiple of ScaleBlockSize.");
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
using XDataType = E8M0;
|
| 144 |
+
using XPackedDataType = E8M0PK;
|
| 145 |
+
using AScaleLayout = Row;
|
| 146 |
+
using BScaleLayout = Col;
|
| 147 |
+
|
| 148 |
+
auto f_host_tensor_descriptor =
|
| 149 |
+
[](ck::index_t row, ck::index_t col, ck::index_t stride, auto layout) {
|
| 150 |
+
using namespace ck::literals;
|
| 151 |
+
|
| 152 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 153 |
+
return HostTensorDescriptor({row, col}, {stride, 1});
|
| 154 |
+
else
|
| 155 |
+
return HostTensorDescriptor({row, col}, {1, stride});
|
| 156 |
+
};
|
| 157 |
+
auto f_get_default_stride =
|
| 158 |
+
[](ck::index_t row, ck::index_t col, ck::index_t stride, auto layout) {
|
| 159 |
+
if(stride == -1)
|
| 160 |
+
{
|
| 161 |
+
// give a chance if stride is -1, return a default packed stride
|
| 162 |
+
if constexpr(std::is_same_v<decltype(layout), ck::tensor_layout::gemm::RowMajor>)
|
| 163 |
+
return static_cast<ck::index_t>(col);
|
| 164 |
+
else
|
| 165 |
+
return static_cast<ck::index_t>(row);
|
| 166 |
+
}
|
| 167 |
+
else
|
| 168 |
+
return static_cast<ck::index_t>(stride);
|
| 169 |
+
};
|
| 170 |
+
|
| 171 |
+
auto Scale_Padded_M = (M + 32 - 1) / 32 * 32;
|
| 172 |
+
auto Scale_Stride_AM =
|
| 173 |
+
f_get_default_stride(Scale_Padded_M, K / ScaleBlockSize, -1, AScaleLayout{});
|
| 174 |
+
auto Scale_Stride_BN = f_get_default_stride(K / ScaleBlockSize, N, -1, BScaleLayout{});
|
| 175 |
+
|
| 176 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 177 |
+
auto b_k_n =
|
| 178 |
+
std::make_shared<Tensor<BDataType>>(f_host_tensor_descriptor(K, N, StrideB, BRefLayout{}));
|
| 179 |
+
auto b_input = b_k_n;
|
| 180 |
+
if constexpr(BPreShuffle)
|
| 181 |
+
b_input = std::make_shared<Tensor<BDataType>>(
|
| 182 |
+
f_host_tensor_descriptor(K, N, StrideB, BRefLayout{})); // use layout only for size
|
| 183 |
+
|
| 184 |
+
// scales for A and B
|
| 185 |
+
Tensor<XDataType> a_m_k_scale(f_host_tensor_descriptor(
|
| 186 |
+
Scale_Padded_M, K / ScaleBlockSize, Scale_Stride_AM, AScaleLayout{}));
|
| 187 |
+
Tensor<XDataType> b_k_n_scale(
|
| 188 |
+
f_host_tensor_descriptor(K / ScaleBlockSize, N, Scale_Stride_BN, BScaleLayout{}));
|
| 189 |
+
|
| 190 |
+
// shuffled scales for A and B
|
| 191 |
+
Tensor<XDataType> a_shuffled_scale(f_host_tensor_descriptor(
|
| 192 |
+
Scale_Padded_M, K / ScaleBlockSize, Scale_Stride_AM, AScaleLayout{}));
|
| 193 |
+
Tensor<XDataType> b_shuffled_scale(
|
| 194 |
+
f_host_tensor_descriptor(K / ScaleBlockSize, N, Scale_Stride_BN, BScaleLayout{}));
|
| 195 |
+
|
| 196 |
+
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 197 |
+
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 198 |
+
|
| 199 |
+
std::size_t total_gemm_needed =
|
| 200 |
+
a_m_k.GetElementSpaceSizeInBytes() + b_k_n->GetElementSpaceSizeInBytes() +
|
| 201 |
+
a_m_k_scale.GetElementSpaceSizeInBytes() + b_k_n_scale.GetElementSpaceSizeInBytes() +
|
| 202 |
+
a_shuffled_scale.GetElementSpaceSizeInBytes() +
|
| 203 |
+
b_shuffled_scale.GetElementSpaceSizeInBytes();
|
| 204 |
+
int rotating_count = std::max(
|
| 205 |
+
1,
|
| 206 |
+
std::min(n_iter,
|
| 207 |
+
static_cast<int>(std::ceil(static_cast<double>(rotating) / total_gemm_needed))));
|
| 208 |
+
|
| 209 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 210 |
+
std::cout << "a_m_k_scale: " << a_m_k_scale.mDesc << std::endl;
|
| 211 |
+
std::cout << "b_k_n: " << b_k_n->mDesc << std::endl;
|
| 212 |
+
std::cout << "b_k_n_scale: " << b_k_n_scale.mDesc << std::endl;
|
| 213 |
+
std::cout << "c_m_n: " << c_m_n_device_result.mDesc << std::endl;
|
| 214 |
+
std::cout << "rotating count: " << rotating_count << std::endl;
|
| 215 |
+
|
| 216 |
+
auto a_data_element = [](float x) {
|
| 217 |
+
if constexpr(ck::is_same_v<ADataType, ck::f4x2_pk_t>)
|
| 218 |
+
return ck::type_convert<ADataType>(ck::float2_t(x));
|
| 219 |
+
else
|
| 220 |
+
return ck::type_convert<ADataType>(x);
|
| 221 |
+
};
|
| 222 |
+
auto b_data_element = [](float x) {
|
| 223 |
+
if constexpr(ck::is_same_v<BDataType, ck::f4x2_pk_t>)
|
| 224 |
+
return ck::type_convert<BDataType>(ck::float2_t(x));
|
| 225 |
+
else
|
| 226 |
+
return ck::type_convert<BDataType>(x);
|
| 227 |
+
};
|
| 228 |
+
|
| 229 |
+
using int_distr = std::uniform_int_distribution<int>;
|
| 230 |
+
using float_distr = std::uniform_real_distribution<float>;
|
| 231 |
+
switch(init_method)
|
| 232 |
+
{
|
| 233 |
+
case 0: // Initializations for development and debugging
|
| 234 |
+
ck::utils::FillConstant<ADataType>{a_data_element(1.0f)}(a_m_k);
|
| 235 |
+
ck::utils::FillConstant<XDataType>{ck::type_convert<XDataType>(2.0f)}(a_m_k_scale);
|
| 236 |
+
ck::utils::FillConstant<BDataType>{b_data_element(0.5f)}(*b_k_n);
|
| 237 |
+
ck::utils::FillConstant<XDataType>{ck::type_convert<XDataType>(1.0f)}(b_k_n_scale);
|
| 238 |
+
if(do_log)
|
| 239 |
+
{
|
| 240 |
+
std::cout << "Init A = {1}" << std::endl;
|
| 241 |
+
std::cout << "Init A scale = {2.0}" << std::endl;
|
| 242 |
+
std::cout << "Init B = {0.5}" << std::endl;
|
| 243 |
+
std::cout << "Init B scale = {1.0}" << std::endl;
|
| 244 |
+
std::cout << "Expect C = {K}" << std::endl;
|
| 245 |
+
}
|
| 246 |
+
break;
|
| 247 |
+
|
| 248 |
+
case 1:
|
| 249 |
+
|
| 250 |
+
a_m_k.GenerateTensorDistr(int_distr{-4, 5}); // Z[-4,4]
|
| 251 |
+
b_k_n->GenerateTensorDistr(int_distr{-4, 5}); // Z[-4,4]
|
| 252 |
+
|
| 253 |
+
a_m_k_scale.GenerateTensorDistr(int_distr{125, 129}); // scales: {0.25, 0.5, 1, 2}
|
| 254 |
+
b_k_n_scale.GenerateTensorDistr(int_distr{125, 129}); // scales: {0.25, 0.5, 1, 2}
|
| 255 |
+
break;
|
| 256 |
+
|
| 257 |
+
default:
|
| 258 |
+
a_m_k.GenerateTensorDistr(float_distr{-2.0, 2.0});
|
| 259 |
+
a_m_k_scale.GenerateTensorDistr(float_distr{powf(2.0f, -125.0f), 1.0f});
|
| 260 |
+
|
| 261 |
+
b_k_n->GenerateTensorDistr(float_distr{-2.0, 2.0});
|
| 262 |
+
b_k_n_scale.GenerateTensorDistr(float_distr{powf(2.0f, -125.0f), 1.0f});
|
| 263 |
+
break;
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
#if 1
|
| 267 |
+
preShuffleScaleBuffer<ck::is_same_v<ALayout, Row>>(a_m_k_scale.mData.data(),
|
| 268 |
+
a_shuffled_scale.mData.data(),
|
| 269 |
+
Scale_Padded_M,
|
| 270 |
+
K / ScaleBlockSize);
|
| 271 |
+
preShuffleScaleBuffer<ck::is_same_v<BRefLayout, Col>>(
|
| 272 |
+
b_k_n_scale.mData.data(), b_shuffled_scale.mData.data(), N, K / ScaleBlockSize);
|
| 273 |
+
if constexpr(BPreShuffle)
|
| 274 |
+
{
|
| 275 |
+
int NPerXdl = 16; // Fixed 16
|
| 276 |
+
preShuffleBuffer(b_k_n->mData.data(), b_input->mData.data(), N, K, NPerXdl);
|
| 277 |
+
}
|
| 278 |
+
#endif
|
| 279 |
+
|
| 280 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 281 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 282 |
+
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 283 |
+
|
| 284 |
+
const auto a_element_op = AElementOp{};
|
| 285 |
+
const auto b_element_op = BElementOp{};
|
| 286 |
+
const auto c_element_op = CElementOp{};
|
| 287 |
+
|
| 288 |
+
if(do_log > 0)
|
| 289 |
+
std::cout << "Device memory allocation..." << std::endl;
|
| 290 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.GetElementSpaceSize());
|
| 291 |
+
DeviceMem a_scale_device_buf(sizeof(XDataType) * a_m_k_scale.GetElementSpaceSize());
|
| 292 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n->GetElementSpaceSize());
|
| 293 |
+
DeviceMem b_scale_device_buf(sizeof(XDataType) * b_k_n_scale.GetElementSpaceSize());
|
| 294 |
+
DeviceMem c_device_buf(sizeof(CDataType) * c_m_n_device_result.GetElementSpaceSize());
|
| 295 |
+
|
| 296 |
+
if(do_log > 0)
|
| 297 |
+
std::cout << "Upload data to device..." << std::endl;
|
| 298 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 299 |
+
a_scale_device_buf.ToDevice(a_shuffled_scale.mData.data());
|
| 300 |
+
b_device_buf.ToDevice(b_input->mData.data());
|
| 301 |
+
b_scale_device_buf.ToDevice(b_shuffled_scale.mData.data());
|
| 302 |
+
|
| 303 |
+
if(do_log > 0)
|
| 304 |
+
std::cout << "Done." << std::endl;
|
| 305 |
+
|
| 306 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGemmMX<ALayout,
|
| 307 |
+
BLayout,
|
| 308 |
+
CLayout,
|
| 309 |
+
ADataType,
|
| 310 |
+
XPackedDataType,
|
| 311 |
+
BDataType,
|
| 312 |
+
XPackedDataType,
|
| 313 |
+
CDataType,
|
| 314 |
+
ScaleBlockSize,
|
| 315 |
+
AElementOp,
|
| 316 |
+
BElementOp,
|
| 317 |
+
CElementOp>;
|
| 318 |
+
std::cout << "finding op instances..." << std::endl;
|
| 319 |
+
// get device op instances
|
| 320 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 321 |
+
DeviceOp>::GetInstances();
|
| 322 |
+
|
| 323 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 324 |
+
|
| 325 |
+
// Run reference GEMM
|
| 326 |
+
if(do_verification)
|
| 327 |
+
{
|
| 328 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceMXGemm< //
|
| 329 |
+
ADataType,
|
| 330 |
+
BDataType,
|
| 331 |
+
CDataType,
|
| 332 |
+
float, // AccDataType
|
| 333 |
+
XDataType,
|
| 334 |
+
AElementOp,
|
| 335 |
+
BElementOp,
|
| 336 |
+
CElementOp,
|
| 337 |
+
float, // ComputeTypeA
|
| 338 |
+
float // ComputeTypeB
|
| 339 |
+
>;
|
| 340 |
+
|
| 341 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 342 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 343 |
+
|
| 344 |
+
auto ref_argument = ref_gemm.MakeArgument(a_m_k,
|
| 345 |
+
a_m_k_scale,
|
| 346 |
+
*b_k_n,
|
| 347 |
+
b_k_n_scale,
|
| 348 |
+
c_m_n_host_result,
|
| 349 |
+
a_element_op,
|
| 350 |
+
b_element_op,
|
| 351 |
+
c_element_op);
|
| 352 |
+
|
| 353 |
+
ref_invoker.Run(ref_argument);
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
std::string best_op_name;
|
| 357 |
+
std::optional<std::string> best_op_object_name;
|
| 358 |
+
float best_ave_time = 0;
|
| 359 |
+
float best_tflops = 0;
|
| 360 |
+
float best_gb_per_sec = 0;
|
| 361 |
+
float best_kbatch = 0;
|
| 362 |
+
bool pass = true;
|
| 363 |
+
|
| 364 |
+
// profile device GEMM instances
|
| 365 |
+
for(auto& op_ptr : op_ptrs)
|
| 366 |
+
{
|
| 367 |
+
std::vector<int> kbatch_list = {1, 2, 4, 8, 16, 19, 32, 38}; // use these when KBatch <= 0
|
| 368 |
+
|
| 369 |
+
if(KBatch > 0)
|
| 370 |
+
{
|
| 371 |
+
kbatch_list = {KBatch};
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
for(std::size_t i = 0; i < kbatch_list.size(); i++)
|
| 375 |
+
{
|
| 376 |
+
auto kbatch_curr = kbatch_list[i];
|
| 377 |
+
|
| 378 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(
|
| 379 |
+
static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
| 380 |
+
static_cast<XPackedDataType*>(a_scale_device_buf.GetDeviceBuffer()),
|
| 381 |
+
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
|
| 382 |
+
static_cast<XPackedDataType*>(b_scale_device_buf.GetDeviceBuffer()),
|
| 383 |
+
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 384 |
+
M,
|
| 385 |
+
N,
|
| 386 |
+
K,
|
| 387 |
+
StrideA,
|
| 388 |
+
Scale_Stride_AM,
|
| 389 |
+
StrideB,
|
| 390 |
+
Scale_Stride_BN,
|
| 391 |
+
StrideC,
|
| 392 |
+
kbatch_curr,
|
| 393 |
+
a_element_op,
|
| 394 |
+
b_element_op,
|
| 395 |
+
c_element_op);
|
| 396 |
+
|
| 397 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 398 |
+
|
| 399 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 400 |
+
{
|
| 401 |
+
|
| 402 |
+
// re-init C to zero before profiling next kernel
|
| 403 |
+
c_device_buf.SetZero();
|
| 404 |
+
|
| 405 |
+
invoker_ptr->Run(argument_ptr.get(),
|
| 406 |
+
StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 407 |
+
|
| 408 |
+
if(do_verification)
|
| 409 |
+
{
|
| 410 |
+
c_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
| 411 |
+
|
| 412 |
+
if(do_log)
|
| 413 |
+
{
|
| 414 |
+
|
| 415 |
+
if(init_method == 0)
|
| 416 |
+
{
|
| 417 |
+
auto expected = static_cast<float>(K);
|
| 418 |
+
auto computed = type_convert<float>(c_m_n_device_result(0, 12));
|
| 419 |
+
|
| 420 |
+
pass = pass & (std::abs(expected - computed) <= 0.0f);
|
| 421 |
+
std::cout << "\nExpected vs Computed: " << expected << " vs "
|
| 422 |
+
<< computed << ((pass) ? " (PASSED!)" : " (FAILED!)")
|
| 423 |
+
<< std::endl
|
| 424 |
+
<< std::endl;
|
| 425 |
+
}
|
| 426 |
+
else
|
| 427 |
+
{
|
| 428 |
+
if constexpr(is_same_v<ADataType, ck::f8_t> ||
|
| 429 |
+
is_same_v<ADataType, ck::bf8_t>)
|
| 430 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k.mData, ",")
|
| 431 |
+
<< "\n";
|
| 432 |
+
else
|
| 433 |
+
std::cout << "A: WIP PRINT PACKED TYPE\n";
|
| 434 |
+
LogRangeAsType<float>(std::cout << "a_scale : ", a_m_k_scale.mData, ",")
|
| 435 |
+
<< "\n";
|
| 436 |
+
if constexpr(is_same_v<BDataType, ck::f8_t> ||
|
| 437 |
+
is_same_v<BDataType, ck::bf8_t>)
|
| 438 |
+
LogRangeAsType<float>(std::cout << "b : ", b_k_n->mData, ",")
|
| 439 |
+
<< "\n";
|
| 440 |
+
else
|
| 441 |
+
std::cout << "B: WIP PRINT PACKED TYPE\n";
|
| 442 |
+
LogRangeAsType<float>(std::cout << "b_scale: ", b_k_n_scale.mData, ",")
|
| 443 |
+
<< "\n";
|
| 444 |
+
LogRangeAsType<float>(
|
| 445 |
+
std::cout << "c_host : ", c_m_n_host_result.mData, ",")
|
| 446 |
+
<< "\n";
|
| 447 |
+
LogRangeAsType<float>(
|
| 448 |
+
std::cout << "c_device: ", c_m_n_device_result.mData, ",")
|
| 449 |
+
<< std::endl;
|
| 450 |
+
}
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
pass = pass & ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 457 |
+
std::optional<std::string> op_obj_name = op_ptr->GetObjectName();
|
| 458 |
+
|
| 459 |
+
float ave_time = invoker_ptr->Run(argument_ptr.get(),
|
| 460 |
+
StreamConfig{nullptr,
|
| 461 |
+
time_kernel,
|
| 462 |
+
0,
|
| 463 |
+
n_warmup,
|
| 464 |
+
n_iter,
|
| 465 |
+
rotating_count > 1,
|
| 466 |
+
rotating_count});
|
| 467 |
+
|
| 468 |
+
// Output size(M*N) * [dot product(2K) + product of scales(K/ScaleBlockSize) +
|
| 469 |
+
// scaling of partial sums(K/ScaleBlockSize)]
|
| 470 |
+
// FLOPS = 2 * M * N * K + 2 * M * N * K / ScaleBlockSize
|
| 471 |
+
std::size_t flop =
|
| 472 |
+
std::size_t(2) * M * N * K + std::size_t(2) * M * N * K / ScaleBlockSize;
|
| 473 |
+
|
| 474 |
+
// TODO: fp6?
|
| 475 |
+
std::size_t num_btype = sizeof(ADataType) * M * K / packed_size_v<ADataType> +
|
| 476 |
+
sizeof(BDataType) * K * N / packed_size_v<BDataType> +
|
| 477 |
+
sizeof(CDataType) * M * N +
|
| 478 |
+
sizeof(XDataType) * (M * K + K * N) / ScaleBlockSize;
|
| 479 |
+
|
| 480 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 481 |
+
|
| 482 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 483 |
+
|
| 484 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops
|
| 485 |
+
<< " TFlops, " << gb_per_sec << " GB/s, " << op_name << ", KBatch "
|
| 486 |
+
<< kbatch_curr << std::endl;
|
| 487 |
+
|
| 488 |
+
if(tflops > best_tflops && ave_time > 1e-10)
|
| 489 |
+
{
|
| 490 |
+
best_op_name = op_name;
|
| 491 |
+
best_op_object_name = op_obj_name;
|
| 492 |
+
best_tflops = tflops;
|
| 493 |
+
best_ave_time = ave_time;
|
| 494 |
+
best_gb_per_sec = gb_per_sec;
|
| 495 |
+
best_kbatch = kbatch_curr;
|
| 496 |
+
}
|
| 497 |
+
}
|
| 498 |
+
else
|
| 499 |
+
{
|
| 500 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem"
|
| 501 |
+
<< std::endl;
|
| 502 |
+
}
|
| 503 |
+
}
|
| 504 |
+
}
|
| 505 |
+
|
| 506 |
+
if constexpr(is_same<CDataType, float>::value)
|
| 507 |
+
{
|
| 508 |
+
std::cout << "Best Perf for datatype = f32";
|
| 509 |
+
}
|
| 510 |
+
else if constexpr(is_same<CDataType, half_t>::value)
|
| 511 |
+
{
|
| 512 |
+
std::cout << "Best Perf for datatype = f16";
|
| 513 |
+
}
|
| 514 |
+
else if constexpr(is_same<CDataType, bhalf_t>::value)
|
| 515 |
+
{
|
| 516 |
+
std::cout << "Best Perf for datatype = bf16";
|
| 517 |
+
}
|
| 518 |
+
std::cout << " ALayout = " << ALayout::name;
|
| 519 |
+
std::cout << " BLayout = " << BLayout::name;
|
| 520 |
+
std::cout << " CLayout = " << CLayout::name;
|
| 521 |
+
|
| 522 |
+
std::cout << " M = " << M << " N = " << N << " K = " << K << " StrideA = " << StrideA
|
| 523 |
+
<< " StrideB = " << StrideB << " StrideC = " << StrideC << " KBatch = " << best_kbatch
|
| 524 |
+
<< " : " << best_ave_time << " ms, " << best_tflops << " TFlops, " << best_gb_per_sec
|
| 525 |
+
<< " GB/s, " << best_op_name << std::endl;
|
| 526 |
+
|
| 527 |
+
if(best_op_object_name)
|
| 528 |
+
std::cout << best_op_object_name.value() << std::endl;
|
| 529 |
+
|
| 530 |
+
return pass;
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
} // namespace profiler
|
| 534 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_reduce_impl.hpp
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include "ck/ck.hpp"
|
| 7 |
+
#include "ck/utility/reduction_operator.hpp"
|
| 8 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_reduce.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 11 |
+
|
| 12 |
+
#include "ck/library/utility/check_err.hpp"
|
| 13 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
| 14 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 15 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 16 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 17 |
+
#include "ck/library/utility/literals.hpp"
|
| 18 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 19 |
+
|
| 20 |
+
namespace ck {
|
| 21 |
+
namespace tensor_operation {
|
| 22 |
+
namespace device {
|
| 23 |
+
namespace instance {
|
| 24 |
+
|
| 25 |
+
using F32 = float;
|
| 26 |
+
using F16 = ck::half_t;
|
| 27 |
+
using ReducePtrsGlobal = ck::Tuple<F32*, F32*>;
|
| 28 |
+
using Div = ck::tensor_operation::element_wise::UnaryDivide;
|
| 29 |
+
using Identity = ck::tensor_operation::element_wise::PassThrough;
|
| 30 |
+
using Square = ck::tensor_operation::element_wise::UnarySquare;
|
| 31 |
+
using ReduceInElementOps = ck::Tuple<Identity, Square>;
|
| 32 |
+
using ReduceOutElementOps = ck::Tuple<Div, Div>;
|
| 33 |
+
|
| 34 |
+
using DeviceGemmReduceNoOpPtr =
|
| 35 |
+
ck::tensor_operation::device::DeviceGemmReducePtr<0, ReducePtrsGlobal::Size()>;
|
| 36 |
+
|
| 37 |
+
void add_device_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_mk_kn_mn_instances(
|
| 38 |
+
std::vector<DeviceGemmReduceNoOpPtr>&);
|
| 39 |
+
|
| 40 |
+
void add_device_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_mk_nk_mn_instances(
|
| 41 |
+
std::vector<DeviceGemmReduceNoOpPtr>&);
|
| 42 |
+
|
| 43 |
+
void add_device_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_km_kn_mn_instances(
|
| 44 |
+
std::vector<DeviceGemmReduceNoOpPtr>&);
|
| 45 |
+
|
| 46 |
+
void add_device_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_km_nk_mn_instances(
|
| 47 |
+
std::vector<DeviceGemmReduceNoOpPtr>&);
|
| 48 |
+
|
| 49 |
+
} // namespace instance
|
| 50 |
+
} // namespace device
|
| 51 |
+
} // namespace tensor_operation
|
| 52 |
+
} // namespace ck
|
| 53 |
+
|
| 54 |
+
namespace ck {
|
| 55 |
+
namespace profiler {
|
| 56 |
+
|
| 57 |
+
template <typename ADataType,
|
| 58 |
+
typename BDataType,
|
| 59 |
+
typename CDataType,
|
| 60 |
+
typename ReduceDataType,
|
| 61 |
+
typename ALayout,
|
| 62 |
+
typename BLayout,
|
| 63 |
+
typename CLayout>
|
| 64 |
+
bool profile_gemm_reduce_impl(int do_verification,
|
| 65 |
+
int init_method,
|
| 66 |
+
bool do_log,
|
| 67 |
+
bool time_kernel,
|
| 68 |
+
int M,
|
| 69 |
+
int N,
|
| 70 |
+
int K,
|
| 71 |
+
int StrideA,
|
| 72 |
+
int StrideB,
|
| 73 |
+
int StrideC)
|
| 74 |
+
{
|
| 75 |
+
bool pass = true;
|
| 76 |
+
|
| 77 |
+
auto f_host_tensor_descriptor =
|
| 78 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 79 |
+
using namespace ck::literals;
|
| 80 |
+
|
| 81 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 82 |
+
{
|
| 83 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 84 |
+
}
|
| 85 |
+
else
|
| 86 |
+
{
|
| 87 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 88 |
+
}
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 92 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 93 |
+
|
| 94 |
+
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 95 |
+
Tensor<ReduceDataType> reduce0_m_host_result({M});
|
| 96 |
+
Tensor<ReduceDataType> reduce1_m_host_result({M});
|
| 97 |
+
|
| 98 |
+
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 99 |
+
Tensor<ReduceDataType> reduce0_m_device_result({M});
|
| 100 |
+
Tensor<ReduceDataType> reduce1_m_device_result({M});
|
| 101 |
+
|
| 102 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 103 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 104 |
+
std::cout << "c_m_n: " << c_m_n_host_result.mDesc << std::endl;
|
| 105 |
+
std::cout << "reduce0_m: " << reduce0_m_host_result.mDesc << std::endl;
|
| 106 |
+
std::cout << "reduce1_m: " << reduce1_m_host_result.mDesc << std::endl;
|
| 107 |
+
|
| 108 |
+
std::size_t num_thread = 1;
|
| 109 |
+
switch(init_method)
|
| 110 |
+
{
|
| 111 |
+
case 0: break;
|
| 112 |
+
case 1:
|
| 113 |
+
std::srand(0);
|
| 114 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5}, num_thread);
|
| 115 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
|
| 116 |
+
break;
|
| 117 |
+
default:
|
| 118 |
+
std::srand(0);
|
| 119 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0}, num_thread);
|
| 120 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 124 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 125 |
+
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 126 |
+
using ReduceOp0 = ck::reduce::Add;
|
| 127 |
+
using ReduceOp1 = ck::reduce::Add;
|
| 128 |
+
using UnaryIdenticElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 129 |
+
using UnarySquareElementOp = ck::tensor_operation::element_wise::UnarySquare;
|
| 130 |
+
using UnaryDivElementOp = ck::tensor_operation::element_wise::UnaryDivide;
|
| 131 |
+
|
| 132 |
+
auto a_element_op = AElementOp{};
|
| 133 |
+
auto b_element_op = BElementOp{};
|
| 134 |
+
auto c_element_op = CElementOp{};
|
| 135 |
+
std::array<void*, 3> gemm_element_ops = {&a_element_op, &b_element_op, &c_element_op};
|
| 136 |
+
|
| 137 |
+
const auto reduce0_op = ReduceOp0{};
|
| 138 |
+
const auto reduce1_op = ReduceOp1{};
|
| 139 |
+
|
| 140 |
+
auto passthrough = UnaryIdenticElementOp{};
|
| 141 |
+
auto square = UnarySquareElementOp{};
|
| 142 |
+
auto div = UnaryDivElementOp{N};
|
| 143 |
+
std::array<void*, 2> reduce_in_element_ops = {&passthrough, &square};
|
| 144 |
+
std::array<void*, 2> reduce_out_element_ops = {&div, &div};
|
| 145 |
+
|
| 146 |
+
if(do_verification)
|
| 147 |
+
{
|
| 148 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 149 |
+
BDataType,
|
| 150 |
+
CDataType,
|
| 151 |
+
ReduceDataType,
|
| 152 |
+
AElementOp,
|
| 153 |
+
BElementOp,
|
| 154 |
+
CElementOp>;
|
| 155 |
+
|
| 156 |
+
using ReduceAccDataType = ReduceDataType;
|
| 157 |
+
|
| 158 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 159 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 160 |
+
|
| 161 |
+
auto ref_argument = ref_gemm.MakeArgument(
|
| 162 |
+
a_m_k, b_k_n, c_m_n_host_result, a_element_op, b_element_op, c_element_op);
|
| 163 |
+
|
| 164 |
+
ref_invoker.Run(ref_argument);
|
| 165 |
+
|
| 166 |
+
for(int m = 0; m < M; ++m)
|
| 167 |
+
{
|
| 168 |
+
auto reduce0_acc = reduce0_op.GetIdentityValue<ReduceAccDataType>();
|
| 169 |
+
auto reduce1_acc = reduce1_op.GetIdentityValue<ReduceAccDataType>();
|
| 170 |
+
|
| 171 |
+
for(int n = 0; n < N; ++n)
|
| 172 |
+
{
|
| 173 |
+
ReduceAccDataType d0_val =
|
| 174 |
+
ck::type_convert<ReduceAccDataType>(c_m_n_host_result(m, n));
|
| 175 |
+
ReduceAccDataType d1_val;
|
| 176 |
+
|
| 177 |
+
square(d1_val, d0_val);
|
| 178 |
+
reduce0_op(reduce0_acc, d0_val);
|
| 179 |
+
reduce1_op(reduce1_acc, d1_val);
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
div(reduce0_acc, reduce0_acc);
|
| 183 |
+
div(reduce1_acc, reduce1_acc);
|
| 184 |
+
reduce0_m_host_result(m) = ck::type_convert<ReduceDataType>(reduce0_acc);
|
| 185 |
+
reduce1_m_host_result(m) = ck::type_convert<ReduceDataType>(reduce1_acc);
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 190 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 191 |
+
DeviceMem c_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 192 |
+
DeviceMem reduce0_device_buf(sizeof(ReduceDataType) *
|
| 193 |
+
reduce0_m_device_result.mDesc.GetElementSpaceSize());
|
| 194 |
+
DeviceMem reduce1_device_buf(sizeof(ReduceDataType) *
|
| 195 |
+
reduce1_m_device_result.mDesc.GetElementSpaceSize());
|
| 196 |
+
|
| 197 |
+
std::array<void*, 2> p_reduces = {reduce0_device_buf.GetDeviceBuffer(),
|
| 198 |
+
reduce1_device_buf.GetDeviceBuffer()};
|
| 199 |
+
|
| 200 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 201 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 202 |
+
|
| 203 |
+
// add device GEMM instances
|
| 204 |
+
std::vector<ck::tensor_operation::device::instance::DeviceGemmReduceNoOpPtr> gemm_ptrs;
|
| 205 |
+
|
| 206 |
+
if constexpr(is_same<ADataType, half_t>::value && is_same<BDataType, half_t>::value &&
|
| 207 |
+
is_same<CDataType, half_t>::value)
|
| 208 |
+
{
|
| 209 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value &&
|
| 210 |
+
is_same<BLayout, tensor_layout::gemm::RowMajor>::value &&
|
| 211 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
| 212 |
+
{
|
| 213 |
+
ck::tensor_operation::device::instance::
|
| 214 |
+
add_device_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_mk_kn_mn_instances(
|
| 215 |
+
gemm_ptrs);
|
| 216 |
+
}
|
| 217 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value &&
|
| 218 |
+
is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value &&
|
| 219 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
| 220 |
+
{
|
| 221 |
+
ck::tensor_operation::device::instance::
|
| 222 |
+
add_device_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_mk_nk_mn_instances(
|
| 223 |
+
gemm_ptrs);
|
| 224 |
+
}
|
| 225 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value &&
|
| 226 |
+
is_same<BLayout, tensor_layout::gemm::RowMajor>::value &&
|
| 227 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
| 228 |
+
{
|
| 229 |
+
ck::tensor_operation::device::instance::
|
| 230 |
+
add_device_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_km_kn_mn_instances(
|
| 231 |
+
gemm_ptrs);
|
| 232 |
+
}
|
| 233 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value &&
|
| 234 |
+
is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value &&
|
| 235 |
+
is_same<CLayout, tensor_layout::gemm::RowMajor>::value)
|
| 236 |
+
{
|
| 237 |
+
ck::tensor_operation::device::instance::
|
| 238 |
+
add_device_gemm_reduce_xdl_cshuffle_f16_f16_f16_f32_f32_km_nk_mn_instances(
|
| 239 |
+
gemm_ptrs);
|
| 240 |
+
}
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
if(gemm_ptrs.size() <= 0)
|
| 244 |
+
{
|
| 245 |
+
throw std::runtime_error("wrong! no device GEMM instance found");
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
std::string best_gemm_name;
|
| 249 |
+
float best_ave_time = 0;
|
| 250 |
+
float best_tflops = 0;
|
| 251 |
+
float best_gb_per_sec = 0;
|
| 252 |
+
|
| 253 |
+
// profile device GEMM instances
|
| 254 |
+
for(auto& gemm_ptr : gemm_ptrs)
|
| 255 |
+
{
|
| 256 |
+
auto argument_ptr = gemm_ptr->MakeArgumentPointer(a_device_buf.GetDeviceBuffer(),
|
| 257 |
+
b_device_buf.GetDeviceBuffer(),
|
| 258 |
+
nullptr,
|
| 259 |
+
{},
|
| 260 |
+
c_device_buf.GetDeviceBuffer(),
|
| 261 |
+
p_reduces,
|
| 262 |
+
M,
|
| 263 |
+
N,
|
| 264 |
+
K,
|
| 265 |
+
StrideA,
|
| 266 |
+
StrideB,
|
| 267 |
+
StrideC,
|
| 268 |
+
{},
|
| 269 |
+
gemm_element_ops,
|
| 270 |
+
{},
|
| 271 |
+
reduce_in_element_ops,
|
| 272 |
+
reduce_out_element_ops);
|
| 273 |
+
|
| 274 |
+
auto invoker_ptr = gemm_ptr->MakeInvokerPointer();
|
| 275 |
+
|
| 276 |
+
if(gemm_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 277 |
+
{
|
| 278 |
+
// init DO, D1 to 0
|
| 279 |
+
reduce0_device_buf.SetZero();
|
| 280 |
+
reduce1_device_buf.SetZero();
|
| 281 |
+
|
| 282 |
+
float ave_time =
|
| 283 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 284 |
+
|
| 285 |
+
std::string gemm_name = gemm_ptr->GetTypeString();
|
| 286 |
+
|
| 287 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 288 |
+
|
| 289 |
+
std::size_t num_btype = sizeof(ADataType) * M * K + sizeof(BDataType) * K * N +
|
| 290 |
+
sizeof(CDataType) * M * N + sizeof(CDataType) * N;
|
| 291 |
+
|
| 292 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 293 |
+
|
| 294 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 295 |
+
|
| 296 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
| 297 |
+
<< " GB/s, " << gemm_name << std::endl;
|
| 298 |
+
|
| 299 |
+
if(tflops > best_tflops)
|
| 300 |
+
{
|
| 301 |
+
best_gemm_name = gemm_name;
|
| 302 |
+
best_tflops = tflops;
|
| 303 |
+
best_ave_time = ave_time;
|
| 304 |
+
best_gb_per_sec = gb_per_sec;
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
if(do_verification)
|
| 308 |
+
{
|
| 309 |
+
c_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
| 310 |
+
reduce0_device_buf.FromDevice(reduce0_m_device_result.mData.data());
|
| 311 |
+
reduce1_device_buf.FromDevice(reduce1_m_device_result.mData.data());
|
| 312 |
+
|
| 313 |
+
ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
|
| 314 |
+
ck::utils::check_err(reduce0_m_device_result, reduce0_m_host_result);
|
| 315 |
+
ck::utils::check_err(reduce1_m_device_result, reduce1_m_host_result);
|
| 316 |
+
|
| 317 |
+
if(do_log)
|
| 318 |
+
{
|
| 319 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k.mData, ",") << std::endl;
|
| 320 |
+
LogRangeAsType<float>(std::cout << "b: ", b_k_n.mData, ",") << std::endl;
|
| 321 |
+
LogRangeAsType<float>(std::cout << "c_host: ", c_m_n_host_result.mData, ",")
|
| 322 |
+
<< std::endl;
|
| 323 |
+
LogRangeAsType<float>(std::cout << "c_device: ", c_m_n_device_result.mData, ",")
|
| 324 |
+
<< std::endl;
|
| 325 |
+
LogRangeAsType<float>(
|
| 326 |
+
std::cout << "d0_host: ", reduce0_m_host_result.mData, ",")
|
| 327 |
+
<< std::endl;
|
| 328 |
+
LogRangeAsType<float>(
|
| 329 |
+
std::cout << "d0_device: ", reduce0_m_device_result.mData, ",")
|
| 330 |
+
<< std::endl;
|
| 331 |
+
LogRangeAsType<float>(
|
| 332 |
+
std::cout << "d1_host: ", reduce1_m_host_result.mData, ",")
|
| 333 |
+
<< std::endl;
|
| 334 |
+
LogRangeAsType<float>(
|
| 335 |
+
std::cout << "d1_device: ", reduce1_m_device_result.mData, ",")
|
| 336 |
+
<< std::endl;
|
| 337 |
+
}
|
| 338 |
+
}
|
| 339 |
+
}
|
| 340 |
+
else
|
| 341 |
+
{
|
| 342 |
+
std::cout << "does not support this GEMM problem" << std::endl;
|
| 343 |
+
}
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 347 |
+
<< best_gb_per_sec << " GB/s, " << best_gemm_name << std::endl;
|
| 348 |
+
|
| 349 |
+
return pass;
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
} // namespace profiler
|
| 353 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_splitk_impl.hpp
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_splitk.hpp"
|
| 13 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_splitk.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/literals.hpp"
|
| 22 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 23 |
+
|
| 24 |
+
namespace ck {
|
| 25 |
+
namespace profiler {
|
| 26 |
+
|
| 27 |
+
template <typename ADataType,
|
| 28 |
+
typename BDataType,
|
| 29 |
+
typename AccDataType,
|
| 30 |
+
typename CDataType,
|
| 31 |
+
typename ALayout,
|
| 32 |
+
typename BLayout,
|
| 33 |
+
typename CLayout,
|
| 34 |
+
typename ComputeType = CDataType>
|
| 35 |
+
bool profile_gemm_splitk_impl(int do_verification,
|
| 36 |
+
int init_method,
|
| 37 |
+
bool do_log,
|
| 38 |
+
bool time_kernel,
|
| 39 |
+
int M,
|
| 40 |
+
int N,
|
| 41 |
+
int K,
|
| 42 |
+
int StrideA,
|
| 43 |
+
int StrideB,
|
| 44 |
+
int StrideC,
|
| 45 |
+
int KBatch,
|
| 46 |
+
int n_warmup,
|
| 47 |
+
int n_iter)
|
| 48 |
+
{
|
| 49 |
+
bool pass = true;
|
| 50 |
+
|
| 51 |
+
auto f_host_tensor_descriptor =
|
| 52 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 53 |
+
using namespace ck::literals;
|
| 54 |
+
|
| 55 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 56 |
+
{
|
| 57 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 58 |
+
}
|
| 59 |
+
else
|
| 60 |
+
{
|
| 61 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 62 |
+
}
|
| 63 |
+
};
|
| 64 |
+
|
| 65 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 66 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 67 |
+
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 68 |
+
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 69 |
+
|
| 70 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 71 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 72 |
+
std::cout << "c_m_n: " << c_m_n_device_result.mDesc << std::endl;
|
| 73 |
+
|
| 74 |
+
switch(init_method)
|
| 75 |
+
{
|
| 76 |
+
case 0: break;
|
| 77 |
+
case 1:
|
| 78 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-1, 2});
|
| 79 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-1, 2});
|
| 80 |
+
break;
|
| 81 |
+
default:
|
| 82 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 83 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 87 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 88 |
+
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 89 |
+
|
| 90 |
+
const auto a_element_op = AElementOp{};
|
| 91 |
+
const auto b_element_op = BElementOp{};
|
| 92 |
+
const auto c_element_op = CElementOp{};
|
| 93 |
+
|
| 94 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 95 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 96 |
+
DeviceMem c_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 97 |
+
|
| 98 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 99 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 100 |
+
|
| 101 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGemmSplitK<ALayout,
|
| 102 |
+
BLayout,
|
| 103 |
+
CLayout,
|
| 104 |
+
ADataType,
|
| 105 |
+
BDataType,
|
| 106 |
+
CDataType,
|
| 107 |
+
AElementOp,
|
| 108 |
+
BElementOp,
|
| 109 |
+
CElementOp,
|
| 110 |
+
ComputeType>;
|
| 111 |
+
|
| 112 |
+
// get device op instances
|
| 113 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 114 |
+
DeviceOp>::GetInstances();
|
| 115 |
+
|
| 116 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 117 |
+
|
| 118 |
+
// Run reference GEMM
|
| 119 |
+
if(do_verification)
|
| 120 |
+
{
|
| 121 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 122 |
+
BDataType,
|
| 123 |
+
CDataType,
|
| 124 |
+
AccDataType,
|
| 125 |
+
AElementOp,
|
| 126 |
+
BElementOp,
|
| 127 |
+
CElementOp,
|
| 128 |
+
ComputeType>;
|
| 129 |
+
|
| 130 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 131 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 132 |
+
|
| 133 |
+
auto ref_argument = ref_gemm.MakeArgument(
|
| 134 |
+
a_m_k, b_k_n, c_m_n_host_result, a_element_op, b_element_op, c_element_op);
|
| 135 |
+
|
| 136 |
+
ref_invoker.Run(ref_argument);
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
std::string best_op_name;
|
| 140 |
+
float best_ave_time = 0;
|
| 141 |
+
float best_tflops = 0;
|
| 142 |
+
float best_gb_per_sec = 0;
|
| 143 |
+
float best_kbatch = 0;
|
| 144 |
+
|
| 145 |
+
// profile device GEMM instances
|
| 146 |
+
for(auto& op_ptr : op_ptrs)
|
| 147 |
+
{
|
| 148 |
+
std::vector<int> kbatch_list = {1, 2, 4, 8, 12, 16, 19, 20, 32, 38};
|
| 149 |
+
|
| 150 |
+
if(KBatch > 0)
|
| 151 |
+
{
|
| 152 |
+
kbatch_list = {KBatch};
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
for(std::size_t i = 0; i < kbatch_list.size(); i++)
|
| 156 |
+
{
|
| 157 |
+
auto kbatch_curr = kbatch_list[i];
|
| 158 |
+
|
| 159 |
+
auto argument_ptr =
|
| 160 |
+
op_ptr->MakeArgumentPointer(static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
| 161 |
+
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
|
| 162 |
+
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 163 |
+
M,
|
| 164 |
+
N,
|
| 165 |
+
K,
|
| 166 |
+
StrideA,
|
| 167 |
+
StrideB,
|
| 168 |
+
StrideC,
|
| 169 |
+
a_element_op,
|
| 170 |
+
b_element_op,
|
| 171 |
+
c_element_op,
|
| 172 |
+
kbatch_curr);
|
| 173 |
+
|
| 174 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 175 |
+
|
| 176 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 177 |
+
{
|
| 178 |
+
|
| 179 |
+
// re-init C to zero before profiling next kernel
|
| 180 |
+
c_device_buf.SetZero();
|
| 181 |
+
|
| 182 |
+
invoker_ptr->Run(argument_ptr.get(),
|
| 183 |
+
StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 184 |
+
|
| 185 |
+
if(do_verification)
|
| 186 |
+
{
|
| 187 |
+
c_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
| 188 |
+
|
| 189 |
+
pass = pass & ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
|
| 190 |
+
|
| 191 |
+
if(do_log)
|
| 192 |
+
{
|
| 193 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k.mData, ",") << std::endl;
|
| 194 |
+
LogRangeAsType<float>(std::cout << "b: ", b_k_n.mData, ",") << std::endl;
|
| 195 |
+
LogRangeAsType<float>(
|
| 196 |
+
std::cout << "c_host : ", c_m_n_host_result.mData, ",")
|
| 197 |
+
<< std::endl;
|
| 198 |
+
LogRangeAsType<float>(
|
| 199 |
+
std::cout << "c_device: ", c_m_n_device_result.mData, ",")
|
| 200 |
+
<< std::endl;
|
| 201 |
+
}
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 205 |
+
|
| 206 |
+
float ave_time = invoker_ptr->Run(
|
| 207 |
+
argument_ptr.get(), StreamConfig{nullptr, time_kernel, 0, n_warmup, n_iter});
|
| 208 |
+
|
| 209 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 210 |
+
|
| 211 |
+
std::size_t num_btype = sizeof(ADataType) * M * K + sizeof(BDataType) * K * N +
|
| 212 |
+
sizeof(CDataType) * M * N;
|
| 213 |
+
|
| 214 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 215 |
+
|
| 216 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 217 |
+
|
| 218 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops
|
| 219 |
+
<< " TFlops, " << gb_per_sec << " GB/s, " << op_name << ", KBatch "
|
| 220 |
+
<< kbatch_curr << std::endl;
|
| 221 |
+
|
| 222 |
+
#if defined CK_ENABLE_FP8
|
| 223 |
+
// set softer tolerances for fp8
|
| 224 |
+
if constexpr(is_same_v<ADataType, f8_t> || is_same_v<BDataType, f8_t> ||
|
| 225 |
+
is_same_v<CDataType, f8_t>)
|
| 226 |
+
{
|
| 227 |
+
std::string msg = "Error: Incorrect results!";
|
| 228 |
+
double rtol = 1e-1;
|
| 229 |
+
double atol = 1e-1;
|
| 230 |
+
pass = pass & ck::utils::check_err(
|
| 231 |
+
c_m_n_device_result, c_m_n_host_result, msg, rtol, atol);
|
| 232 |
+
}
|
| 233 |
+
else
|
| 234 |
+
{
|
| 235 |
+
#endif
|
| 236 |
+
pass = pass & ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
|
| 237 |
+
#if defined CK_ENABLE_FP8
|
| 238 |
+
}
|
| 239 |
+
#endif
|
| 240 |
+
|
| 241 |
+
if(tflops > best_tflops)
|
| 242 |
+
{
|
| 243 |
+
best_op_name = op_name;
|
| 244 |
+
best_tflops = tflops;
|
| 245 |
+
best_ave_time = ave_time;
|
| 246 |
+
best_gb_per_sec = gb_per_sec;
|
| 247 |
+
best_kbatch = kbatch_curr;
|
| 248 |
+
}
|
| 249 |
+
}
|
| 250 |
+
else
|
| 251 |
+
{
|
| 252 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem"
|
| 253 |
+
<< std::endl;
|
| 254 |
+
}
|
| 255 |
+
}
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
if constexpr(is_same<CDataType, float>::value)
|
| 259 |
+
{
|
| 260 |
+
std::cout << "Best Perf for datatype = f32";
|
| 261 |
+
}
|
| 262 |
+
else if constexpr(is_same<CDataType, half_t>::value)
|
| 263 |
+
{
|
| 264 |
+
std::cout << "Best Perf for datatype = f16";
|
| 265 |
+
}
|
| 266 |
+
else if constexpr(is_same<CDataType, bhalf_t>::value)
|
| 267 |
+
{
|
| 268 |
+
std::cout << "Best Perf for datatype = bf16";
|
| 269 |
+
}
|
| 270 |
+
else if constexpr(is_same<CDataType, int8_t>::value)
|
| 271 |
+
{
|
| 272 |
+
std::cout << "Best Perf for datatype = int8";
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value)
|
| 276 |
+
{
|
| 277 |
+
std::cout << " ALayout = RowMajor";
|
| 278 |
+
}
|
| 279 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 280 |
+
{
|
| 281 |
+
std::cout << " ALayout = ColumnMajor";
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
if constexpr(is_same<BLayout, tensor_layout::gemm::RowMajor>::value)
|
| 285 |
+
{
|
| 286 |
+
std::cout << " BLayout = RowMajor";
|
| 287 |
+
}
|
| 288 |
+
else if constexpr(is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 289 |
+
{
|
| 290 |
+
std::cout << " BLayout = ColumnMajor";
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
std::cout << " M = " << M << " N = " << N << " K = " << K << " StrideA = " << StrideA
|
| 294 |
+
<< " StrideB = " << StrideB << " StrideC = " << StrideC << " KBatch = " << best_kbatch
|
| 295 |
+
<< " : " << best_ave_time << " ms, " << best_tflops << " TFlops, " << best_gb_per_sec
|
| 296 |
+
<< " GB/s, " << best_op_name << std::endl;
|
| 297 |
+
|
| 298 |
+
return pass;
|
| 299 |
+
}
|
| 300 |
+
|
| 301 |
+
} // namespace profiler
|
| 302 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_universal_batched_impl.hpp
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <memory>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/device_batched_gemm.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/device_batched_gemm_multi_d.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 13 |
+
|
| 14 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm.hpp"
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/batched_gemm_multi_d.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/literals.hpp"
|
| 22 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_batched_gemm.hpp"
|
| 23 |
+
|
| 24 |
+
namespace ck {
|
| 25 |
+
namespace profiler {
|
| 26 |
+
|
| 27 |
+
template <typename ADataType,
|
| 28 |
+
typename BDataType,
|
| 29 |
+
typename CDataType,
|
| 30 |
+
typename ALayout,
|
| 31 |
+
typename BLayout,
|
| 32 |
+
typename CLayout,
|
| 33 |
+
typename AElementOp,
|
| 34 |
+
typename BElementOp,
|
| 35 |
+
typename CElementOp,
|
| 36 |
+
typename DeviceOp>
|
| 37 |
+
bool profile_gemm_universal_batched_impl(int do_verification,
|
| 38 |
+
int init_method,
|
| 39 |
+
bool do_log,
|
| 40 |
+
bool time_kernel,
|
| 41 |
+
int M,
|
| 42 |
+
int N,
|
| 43 |
+
int K,
|
| 44 |
+
int BatchStrideA,
|
| 45 |
+
int BatchStrideB,
|
| 46 |
+
int BatchStrideC,
|
| 47 |
+
int StrideA,
|
| 48 |
+
int StrideB,
|
| 49 |
+
int StrideC,
|
| 50 |
+
int BatchCount,
|
| 51 |
+
int KBatch,
|
| 52 |
+
int n_warmup,
|
| 53 |
+
int n_iter,
|
| 54 |
+
uint64_t rotating = 0)
|
| 55 |
+
{
|
| 56 |
+
bool pass = true;
|
| 57 |
+
|
| 58 |
+
auto f_host_tensor_descriptor = [](std::size_t batch_count,
|
| 59 |
+
std::size_t row,
|
| 60 |
+
std::size_t col,
|
| 61 |
+
std::size_t stride,
|
| 62 |
+
std::size_t batch_stride,
|
| 63 |
+
auto layout) {
|
| 64 |
+
using namespace ck::literals;
|
| 65 |
+
|
| 66 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 67 |
+
{
|
| 68 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, stride, 1_uz});
|
| 69 |
+
}
|
| 70 |
+
else
|
| 71 |
+
{
|
| 72 |
+
return HostTensorDescriptor({batch_count, row, col}, {batch_stride, 1_uz, stride});
|
| 73 |
+
}
|
| 74 |
+
};
|
| 75 |
+
|
| 76 |
+
Tensor<ADataType> a_g_m_k(
|
| 77 |
+
f_host_tensor_descriptor(BatchCount, M, K, StrideA, BatchStrideA, ALayout{}));
|
| 78 |
+
Tensor<BDataType> b_g_k_n(
|
| 79 |
+
f_host_tensor_descriptor(BatchCount, K, N, StrideB, BatchStrideB, BLayout{}));
|
| 80 |
+
Tensor<CDataType> c_g_m_n_host_result(
|
| 81 |
+
f_host_tensor_descriptor(BatchCount, M, N, StrideC, BatchStrideC, CLayout{}));
|
| 82 |
+
Tensor<CDataType> c_g_m_n_device_result(
|
| 83 |
+
f_host_tensor_descriptor(BatchCount, M, N, StrideC, BatchStrideC, CLayout{}));
|
| 84 |
+
|
| 85 |
+
int total_gemm_needed =
|
| 86 |
+
a_g_m_k.GetElementSpaceSizeInBytes() + b_g_k_n.GetElementSpaceSizeInBytes();
|
| 87 |
+
int rotating_count = std::max(
|
| 88 |
+
1,
|
| 89 |
+
std::min(n_iter,
|
| 90 |
+
static_cast<int>(std::ceil(static_cast<double>(rotating) / total_gemm_needed))));
|
| 91 |
+
|
| 92 |
+
std::cout << "a_g_m_k: " << a_g_m_k.mDesc << std::endl;
|
| 93 |
+
std::cout << "b_g_k_n: " << b_g_k_n.mDesc << std::endl;
|
| 94 |
+
std::cout << "c_g_m_n: " << c_g_m_n_host_result.mDesc << std::endl;
|
| 95 |
+
std::cout << "rotating count: " << rotating_count << std::endl;
|
| 96 |
+
|
| 97 |
+
switch(init_method)
|
| 98 |
+
{
|
| 99 |
+
case 0: break;
|
| 100 |
+
case 1:
|
| 101 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5});
|
| 102 |
+
b_g_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5});
|
| 103 |
+
break;
|
| 104 |
+
default:
|
| 105 |
+
a_g_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 106 |
+
b_g_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
const auto a_element_op = AElementOp{};
|
| 110 |
+
const auto b_element_op = BElementOp{};
|
| 111 |
+
const auto c_element_op = CElementOp{};
|
| 112 |
+
|
| 113 |
+
if(do_verification)
|
| 114 |
+
{
|
| 115 |
+
using ReferenceBatchedGemmInstance =
|
| 116 |
+
ck::tensor_operation::host::ReferenceBatchedGemm<ADataType,
|
| 117 |
+
BDataType,
|
| 118 |
+
CDataType,
|
| 119 |
+
float,
|
| 120 |
+
AElementOp,
|
| 121 |
+
BElementOp,
|
| 122 |
+
CElementOp>;
|
| 123 |
+
|
| 124 |
+
auto ref_batched_gemm = ReferenceBatchedGemmInstance{};
|
| 125 |
+
auto ref_invoker = ref_batched_gemm.MakeInvoker();
|
| 126 |
+
|
| 127 |
+
auto ref_argument = ref_batched_gemm.MakeArgument(
|
| 128 |
+
a_g_m_k, b_g_k_n, c_g_m_n_host_result, a_element_op, b_element_op, c_element_op);
|
| 129 |
+
|
| 130 |
+
ref_invoker.Run(ref_argument);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_g_m_k.mDesc.GetElementSpaceSize());
|
| 134 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_g_k_n.mDesc.GetElementSpaceSize());
|
| 135 |
+
DeviceMem c_device_buf(sizeof(CDataType) * c_g_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 136 |
+
|
| 137 |
+
a_device_buf.ToDevice(a_g_m_k.mData.data());
|
| 138 |
+
b_device_buf.ToDevice(b_g_k_n.mData.data());
|
| 139 |
+
c_device_buf.ToDevice(c_g_m_n_device_result.mData.data());
|
| 140 |
+
|
| 141 |
+
// get device op instances
|
| 142 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 143 |
+
DeviceOp>::GetInstances();
|
| 144 |
+
|
| 145 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 146 |
+
|
| 147 |
+
std::string best_op_name;
|
| 148 |
+
float best_ave_time = 0;
|
| 149 |
+
float best_tflops = 0;
|
| 150 |
+
float best_gb_per_sec = 0;
|
| 151 |
+
float best_kbatch = 0;
|
| 152 |
+
|
| 153 |
+
// profile device op instances
|
| 154 |
+
for(auto& op_ptr : op_ptrs)
|
| 155 |
+
{
|
| 156 |
+
std::vector<int> kbatch_list = {1, 2, 4, 8, 16, 19, 32, 38};
|
| 157 |
+
|
| 158 |
+
if(KBatch > 0)
|
| 159 |
+
{
|
| 160 |
+
kbatch_list = {KBatch};
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
for(std::size_t i = 0; i < kbatch_list.size(); i++)
|
| 164 |
+
{
|
| 165 |
+
auto kbatch_curr = kbatch_list[i];
|
| 166 |
+
|
| 167 |
+
auto argument_ptr =
|
| 168 |
+
op_ptr->MakeArgumentPointer(static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
| 169 |
+
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
|
| 170 |
+
{},
|
| 171 |
+
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 172 |
+
M,
|
| 173 |
+
N,
|
| 174 |
+
K,
|
| 175 |
+
BatchCount,
|
| 176 |
+
StrideA,
|
| 177 |
+
StrideB,
|
| 178 |
+
{},
|
| 179 |
+
StrideC,
|
| 180 |
+
BatchStrideA,
|
| 181 |
+
BatchStrideB,
|
| 182 |
+
{},
|
| 183 |
+
BatchStrideC,
|
| 184 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
| 185 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
| 186 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
| 187 |
+
kbatch_curr);
|
| 188 |
+
|
| 189 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 190 |
+
|
| 191 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 192 |
+
{
|
| 193 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 194 |
+
|
| 195 |
+
float ave_time = invoker_ptr->Run(
|
| 196 |
+
argument_ptr.get(),
|
| 197 |
+
StreamConfig{nullptr, time_kernel, 0, n_warmup, n_iter, true, rotating_count});
|
| 198 |
+
|
| 199 |
+
std::size_t flop = std::size_t(2) * BatchCount * M * N * K;
|
| 200 |
+
|
| 201 |
+
std::size_t num_btype = (sizeof(ADataType) * M * K + sizeof(BDataType) * K * N +
|
| 202 |
+
sizeof(CDataType) * M * N) *
|
| 203 |
+
BatchCount;
|
| 204 |
+
|
| 205 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 206 |
+
|
| 207 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 208 |
+
|
| 209 |
+
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
|
| 210 |
+
<< " GB/s, " << op_name << ", KBatch " << kbatch_curr << std::endl;
|
| 211 |
+
|
| 212 |
+
if(tflops > best_tflops)
|
| 213 |
+
{
|
| 214 |
+
best_op_name = op_name;
|
| 215 |
+
best_tflops = tflops;
|
| 216 |
+
best_ave_time = ave_time;
|
| 217 |
+
best_gb_per_sec = gb_per_sec;
|
| 218 |
+
best_kbatch = kbatch_curr;
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
if(do_verification)
|
| 222 |
+
{
|
| 223 |
+
c_device_buf.FromDevice(c_g_m_n_device_result.mData.data());
|
| 224 |
+
|
| 225 |
+
pass = pass & ck::utils::check_err(c_g_m_n_device_result, c_g_m_n_host_result);
|
| 226 |
+
|
| 227 |
+
if(do_log)
|
| 228 |
+
{
|
| 229 |
+
LogRangeAsType<float>(std::cout << "a : ", a_g_m_k.mData, ",") << std::endl;
|
| 230 |
+
LogRangeAsType<float>(std::cout << "b: ", b_g_k_n.mData, ",") << std::endl;
|
| 231 |
+
LogRangeAsType<float>(
|
| 232 |
+
std::cout << "c_host: ", c_g_m_n_host_result.mData, ",")
|
| 233 |
+
<< std::endl;
|
| 234 |
+
LogRangeAsType<float>(
|
| 235 |
+
std::cout << "c_device: ", c_g_m_n_device_result.mData, ",")
|
| 236 |
+
<< std::endl;
|
| 237 |
+
}
|
| 238 |
+
}
|
| 239 |
+
}
|
| 240 |
+
else
|
| 241 |
+
{
|
| 242 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem"
|
| 243 |
+
<< std::endl;
|
| 244 |
+
}
|
| 245 |
+
}
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
if constexpr(is_same<CDataType, float>::value)
|
| 249 |
+
{
|
| 250 |
+
std::cout << "Best Perf for datatype = f32";
|
| 251 |
+
}
|
| 252 |
+
else if constexpr(is_same<CDataType, half_t>::value)
|
| 253 |
+
{
|
| 254 |
+
std::cout << "Best Perf for datatype = f16";
|
| 255 |
+
}
|
| 256 |
+
else if constexpr(is_same<CDataType, bhalf_t>::value)
|
| 257 |
+
{
|
| 258 |
+
std::cout << "Best Perf for datatype = bf16";
|
| 259 |
+
}
|
| 260 |
+
else if constexpr(is_same<CDataType, int8_t>::value)
|
| 261 |
+
{
|
| 262 |
+
std::cout << "Best Perf for datatype = int8";
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value)
|
| 266 |
+
{
|
| 267 |
+
std::cout << " ALayout = RowMajor";
|
| 268 |
+
}
|
| 269 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 270 |
+
{
|
| 271 |
+
std::cout << " ALayout = ColumnMajor";
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
if constexpr(is_same<BLayout, tensor_layout::gemm::RowMajor>::value)
|
| 275 |
+
{
|
| 276 |
+
std::cout << " BLayout = RowMajor";
|
| 277 |
+
}
|
| 278 |
+
else if constexpr(is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 279 |
+
{
|
| 280 |
+
std::cout << " BLayout = ColumnMajor";
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
std::cout << " B = " << BatchCount << " M = " << M << " N = " << N << " K = " << K
|
| 284 |
+
<< " StrideA = " << StrideA << " StrideB = " << StrideB << " StrideC = " << StrideC
|
| 285 |
+
<< " KBatch = " << best_kbatch << ": " << best_ave_time << " ms, " << best_tflops
|
| 286 |
+
<< " TFlops, " << best_gb_per_sec << " GB/s, " << best_op_name << std::endl;
|
| 287 |
+
|
| 288 |
+
return pass;
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
} // namespace profiler
|
| 292 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_universal_impl.hpp
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2023-2025, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/device/device_gemm_v2.hpp"
|
| 13 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_universal.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/literals.hpp"
|
| 22 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 23 |
+
|
| 24 |
+
namespace ck {
|
| 25 |
+
namespace profiler {
|
| 26 |
+
|
| 27 |
+
template <typename ADataType,
|
| 28 |
+
typename BDataType,
|
| 29 |
+
typename ComputeDataType,
|
| 30 |
+
typename AccDataType,
|
| 31 |
+
typename CDataType,
|
| 32 |
+
typename ALayout,
|
| 33 |
+
typename BLayout,
|
| 34 |
+
typename CLayout>
|
| 35 |
+
bool profile_gemm_universal_impl(int do_verification,
|
| 36 |
+
int init_method,
|
| 37 |
+
bool do_log,
|
| 38 |
+
bool time_kernel,
|
| 39 |
+
int M,
|
| 40 |
+
int N,
|
| 41 |
+
int K,
|
| 42 |
+
int StrideA,
|
| 43 |
+
int StrideB,
|
| 44 |
+
int StrideC,
|
| 45 |
+
int KBatch,
|
| 46 |
+
int n_warmup,
|
| 47 |
+
int n_iter,
|
| 48 |
+
uint64_t rotating = 0)
|
| 49 |
+
{
|
| 50 |
+
bool pass = true;
|
| 51 |
+
|
| 52 |
+
auto f_host_tensor_descriptor =
|
| 53 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 54 |
+
using namespace ck::literals;
|
| 55 |
+
|
| 56 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 57 |
+
{
|
| 58 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 59 |
+
}
|
| 60 |
+
else
|
| 61 |
+
{
|
| 62 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 63 |
+
}
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 67 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 68 |
+
Tensor<BDataType> b_k_n_permute(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 69 |
+
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 70 |
+
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 71 |
+
|
| 72 |
+
std::size_t total_gemm_needed =
|
| 73 |
+
a_m_k.GetElementSpaceSizeInBytes() + b_k_n.GetElementSpaceSizeInBytes();
|
| 74 |
+
int rotating_count = std::max(
|
| 75 |
+
1,
|
| 76 |
+
std::min(n_iter,
|
| 77 |
+
static_cast<int>(std::ceil(static_cast<double>(rotating) / total_gemm_needed))));
|
| 78 |
+
|
| 79 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 80 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 81 |
+
std::cout << "c_m_n: " << c_m_n_device_result.mDesc << std::endl;
|
| 82 |
+
std::cout << "rotating count: " << rotating_count << std::endl;
|
| 83 |
+
|
| 84 |
+
switch(init_method)
|
| 85 |
+
{
|
| 86 |
+
case 0: break;
|
| 87 |
+
case 1:
|
| 88 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-1, 2});
|
| 89 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-1, 2});
|
| 90 |
+
break;
|
| 91 |
+
case 2:
|
| 92 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 93 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
| 94 |
+
break;
|
| 95 |
+
default:
|
| 96 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 97 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-2, 2});
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 101 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 102 |
+
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 103 |
+
|
| 104 |
+
const auto a_element_op = AElementOp{};
|
| 105 |
+
const auto b_element_op = BElementOp{};
|
| 106 |
+
const auto c_element_op = CElementOp{};
|
| 107 |
+
|
| 108 |
+
DeviceMem a_device_buf(a_m_k.GetElementSpaceSizeInBytes());
|
| 109 |
+
DeviceMem b_device_buf(b_k_n_permute.GetElementSpaceSizeInBytes());
|
| 110 |
+
DeviceMem c_device_buf(c_m_n_device_result.GetElementSpaceSizeInBytes());
|
| 111 |
+
|
| 112 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 113 |
+
|
| 114 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGemmV2<ALayout,
|
| 115 |
+
BLayout,
|
| 116 |
+
CLayout,
|
| 117 |
+
ADataType,
|
| 118 |
+
BDataType,
|
| 119 |
+
CDataType,
|
| 120 |
+
AElementOp,
|
| 121 |
+
BElementOp,
|
| 122 |
+
CElementOp>;
|
| 123 |
+
|
| 124 |
+
// get device op instances
|
| 125 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 126 |
+
DeviceOp>::GetInstances();
|
| 127 |
+
|
| 128 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 129 |
+
|
| 130 |
+
// Run reference GEMM
|
| 131 |
+
if(do_verification)
|
| 132 |
+
{
|
| 133 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 134 |
+
BDataType,
|
| 135 |
+
CDataType,
|
| 136 |
+
AccDataType,
|
| 137 |
+
AElementOp,
|
| 138 |
+
BElementOp,
|
| 139 |
+
CElementOp,
|
| 140 |
+
ComputeDataType>;
|
| 141 |
+
|
| 142 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 143 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 144 |
+
|
| 145 |
+
auto ref_argument = ref_gemm.MakeArgument(
|
| 146 |
+
a_m_k, b_k_n, c_m_n_host_result, a_element_op, b_element_op, c_element_op);
|
| 147 |
+
|
| 148 |
+
ref_invoker.Run(ref_argument);
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
std::string best_op_name;
|
| 152 |
+
std::optional<std::string> best_op_object_name;
|
| 153 |
+
float best_ave_time = 0;
|
| 154 |
+
float best_tflops = 0;
|
| 155 |
+
float best_gb_per_sec = 0;
|
| 156 |
+
float best_kbatch = 0;
|
| 157 |
+
|
| 158 |
+
// profile device GEMM instances
|
| 159 |
+
for(auto& op_ptr : op_ptrs)
|
| 160 |
+
{
|
| 161 |
+
const int KPerBlock = op_ptr->GetKPerBlock();
|
| 162 |
+
|
| 163 |
+
if(op_ptr->GetPermuteB())
|
| 164 |
+
{
|
| 165 |
+
int K1 = KPerBlock;
|
| 166 |
+
int K0 = K / KPerBlock;
|
| 167 |
+
|
| 168 |
+
// int K0, N, K1
|
| 169 |
+
for(int j = 0; j < K0; j++)
|
| 170 |
+
{
|
| 171 |
+
for(int i = 0; i < N; i++)
|
| 172 |
+
{
|
| 173 |
+
for(int jj = 0; jj < K1; jj++)
|
| 174 |
+
{
|
| 175 |
+
b_k_n_permute(j * N * K1 + i * K1 + jj) = b_k_n(i * K + (j * K1 + jj));
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
else
|
| 181 |
+
{
|
| 182 |
+
b_k_n_permute = b_k_n;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
#if CK_USE_PK4_LAYOUT_SHUFFLE
|
| 186 |
+
// Conversion from pk_i4_t to half_t expects a particular permutation
|
| 187 |
+
if constexpr(is_same_v<BDataType, pk_i4_t> && is_same_v<ComputeDataType, half_t>)
|
| 188 |
+
{
|
| 189 |
+
// vector pk_i4x4 permute
|
| 190 |
+
for(int i = 0; i < N; i++)
|
| 191 |
+
{
|
| 192 |
+
for(int j = 0; j < K; j += 8)
|
| 193 |
+
{
|
| 194 |
+
int input[8];
|
| 195 |
+
|
| 196 |
+
for(int k = 0; k < 4; k++)
|
| 197 |
+
{
|
| 198 |
+
int i4x2 = b_k_n_permute(j + k * 2, i).data;
|
| 199 |
+
input[k * 2 + 0] = (i4x2 >> 4) & 0xf;
|
| 200 |
+
input[k * 2 + 1] = (i4x2 >> 0) & 0xf;
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
// permute 01234567->20643175
|
| 204 |
+
{
|
| 205 |
+
int hi = input[2];
|
| 206 |
+
int lo = input[0];
|
| 207 |
+
int i4x2 = (hi << 4) | lo;
|
| 208 |
+
|
| 209 |
+
b_k_n_permute(j + 0, i) = i4x2;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
{
|
| 213 |
+
int hi = input[6];
|
| 214 |
+
int lo = input[4];
|
| 215 |
+
int i4x2 = (hi << 4) | lo;
|
| 216 |
+
|
| 217 |
+
b_k_n_permute(j + 2, i) = i4x2;
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
{
|
| 221 |
+
int hi = input[3];
|
| 222 |
+
int lo = input[1];
|
| 223 |
+
int i4x2 = (hi << 4) | lo;
|
| 224 |
+
|
| 225 |
+
b_k_n_permute(j + 4, i) = i4x2;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
{
|
| 229 |
+
int hi = input[7];
|
| 230 |
+
int lo = input[5];
|
| 231 |
+
int i4x2 = (hi << 4) | lo;
|
| 232 |
+
|
| 233 |
+
b_k_n_permute(j + 6, i) = i4x2;
|
| 234 |
+
}
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
}
|
| 238 |
+
#endif
|
| 239 |
+
|
| 240 |
+
b_device_buf.ToDevice(b_k_n_permute.mData.data());
|
| 241 |
+
|
| 242 |
+
std::vector<int> kbatch_list = {1, 2, 4, 8, 16, 19, 32, 38};
|
| 243 |
+
|
| 244 |
+
if(KBatch > 0)
|
| 245 |
+
{
|
| 246 |
+
kbatch_list = {KBatch};
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
for(std::size_t i = 0; i < kbatch_list.size(); i++)
|
| 250 |
+
{
|
| 251 |
+
auto kbatch_curr = kbatch_list[i];
|
| 252 |
+
|
| 253 |
+
auto argument_ptr =
|
| 254 |
+
op_ptr->MakeArgumentPointer(static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
| 255 |
+
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
|
| 256 |
+
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 257 |
+
M,
|
| 258 |
+
N,
|
| 259 |
+
K,
|
| 260 |
+
StrideA,
|
| 261 |
+
StrideB,
|
| 262 |
+
StrideC,
|
| 263 |
+
kbatch_curr,
|
| 264 |
+
a_element_op,
|
| 265 |
+
b_element_op,
|
| 266 |
+
c_element_op);
|
| 267 |
+
|
| 268 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 269 |
+
|
| 270 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 271 |
+
{
|
| 272 |
+
|
| 273 |
+
// re-init C to zero before profiling next kernel
|
| 274 |
+
c_device_buf.SetZero();
|
| 275 |
+
|
| 276 |
+
invoker_ptr->Run(argument_ptr.get(),
|
| 277 |
+
StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 278 |
+
|
| 279 |
+
if(do_verification)
|
| 280 |
+
{
|
| 281 |
+
c_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
| 282 |
+
|
| 283 |
+
#if defined CK_ENABLE_FP8
|
| 284 |
+
// set softer tolerances for fp8
|
| 285 |
+
if constexpr(is_same_v<ADataType, f8_t> || is_same_v<BDataType, f8_t> ||
|
| 286 |
+
is_same_v<CDataType, f8_t>)
|
| 287 |
+
{
|
| 288 |
+
std::string msg = "Error: Incorrect results!";
|
| 289 |
+
double rtol = 1e-1;
|
| 290 |
+
double atol = 1e-1;
|
| 291 |
+
pass = pass & ck::utils::check_err(
|
| 292 |
+
c_m_n_device_result, c_m_n_host_result, msg, rtol, atol);
|
| 293 |
+
}
|
| 294 |
+
else
|
| 295 |
+
{
|
| 296 |
+
#endif
|
| 297 |
+
pass = pass & ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
|
| 298 |
+
#if defined CK_ENABLE_FP8
|
| 299 |
+
}
|
| 300 |
+
#endif
|
| 301 |
+
|
| 302 |
+
if(do_log)
|
| 303 |
+
{
|
| 304 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k.mData, ",") << std::endl;
|
| 305 |
+
LogRangeAsType<float>(std::cout << "b: ", b_k_n.mData, ",") << std::endl;
|
| 306 |
+
LogRangeAsType<float>(
|
| 307 |
+
std::cout << "c_host : ", c_m_n_host_result.mData, ",")
|
| 308 |
+
<< std::endl;
|
| 309 |
+
LogRangeAsType<float>(
|
| 310 |
+
std::cout << "c_device: ", c_m_n_device_result.mData, ",")
|
| 311 |
+
<< std::endl;
|
| 312 |
+
}
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 316 |
+
std::optional<std::string> op_obj_name = op_ptr->GetObjectName();
|
| 317 |
+
|
| 318 |
+
float ave_time = invoker_ptr->Run(argument_ptr.get(),
|
| 319 |
+
StreamConfig{nullptr,
|
| 320 |
+
time_kernel,
|
| 321 |
+
0,
|
| 322 |
+
n_warmup,
|
| 323 |
+
n_iter,
|
| 324 |
+
rotating_count > 1,
|
| 325 |
+
rotating_count});
|
| 326 |
+
|
| 327 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 328 |
+
|
| 329 |
+
static constexpr index_t BPackedSize = []() {
|
| 330 |
+
if constexpr(is_same_v<remove_cvref_t<BDataType>, pk_i4_t>)
|
| 331 |
+
return 2;
|
| 332 |
+
else
|
| 333 |
+
return 1;
|
| 334 |
+
}();
|
| 335 |
+
|
| 336 |
+
std::size_t num_btype = sizeof(ADataType) * M * K +
|
| 337 |
+
sizeof(BDataType) * K * N / BPackedSize +
|
| 338 |
+
sizeof(CDataType) * M * N;
|
| 339 |
+
|
| 340 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 341 |
+
|
| 342 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 343 |
+
|
| 344 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops
|
| 345 |
+
<< " TFlops, " << gb_per_sec << " GB/s, " << op_name << ", KBatch "
|
| 346 |
+
<< kbatch_curr << std::endl;
|
| 347 |
+
|
| 348 |
+
if(tflops > best_tflops && ave_time > 1e-10)
|
| 349 |
+
{
|
| 350 |
+
best_op_name = op_name;
|
| 351 |
+
best_op_object_name = op_obj_name;
|
| 352 |
+
best_tflops = tflops;
|
| 353 |
+
best_ave_time = ave_time;
|
| 354 |
+
best_gb_per_sec = gb_per_sec;
|
| 355 |
+
best_kbatch = kbatch_curr;
|
| 356 |
+
}
|
| 357 |
+
}
|
| 358 |
+
else
|
| 359 |
+
{
|
| 360 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem"
|
| 361 |
+
<< std::endl;
|
| 362 |
+
}
|
| 363 |
+
}
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
if constexpr(is_same<CDataType, float>::value)
|
| 367 |
+
{
|
| 368 |
+
std::cout << "Best Perf for datatype = f32";
|
| 369 |
+
}
|
| 370 |
+
else if constexpr(is_same<CDataType, half_t>::value)
|
| 371 |
+
{
|
| 372 |
+
std::cout << "Best Perf for datatype = f16";
|
| 373 |
+
}
|
| 374 |
+
else if constexpr(is_same<CDataType, bhalf_t>::value)
|
| 375 |
+
{
|
| 376 |
+
std::cout << "Best Perf for datatype = bf16";
|
| 377 |
+
}
|
| 378 |
+
else if constexpr(is_same<CDataType, int8_t>::value)
|
| 379 |
+
{
|
| 380 |
+
std::cout << "Best Perf for datatype = int8";
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value)
|
| 384 |
+
{
|
| 385 |
+
std::cout << " ALayout = RowMajor";
|
| 386 |
+
}
|
| 387 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 388 |
+
{
|
| 389 |
+
std::cout << " ALayout = ColumnMajor";
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
if constexpr(is_same<BLayout, tensor_layout::gemm::RowMajor>::value)
|
| 393 |
+
{
|
| 394 |
+
std::cout << " BLayout = RowMajor";
|
| 395 |
+
}
|
| 396 |
+
else if constexpr(is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 397 |
+
{
|
| 398 |
+
std::cout << " BLayout = ColumnMajor";
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
std::cout << " M = " << M << " N = " << N << " K = " << K << " StrideA = " << StrideA
|
| 402 |
+
<< " StrideB = " << StrideB << " StrideC = " << StrideC << " KBatch = " << best_kbatch
|
| 403 |
+
<< " : " << best_ave_time << " ms, " << best_tflops << " TFlops, " << best_gb_per_sec
|
| 404 |
+
<< " GB/s, " << best_op_name << std::endl;
|
| 405 |
+
|
| 406 |
+
if(best_op_object_name)
|
| 407 |
+
std::cout << best_op_object_name.value() << std::endl;
|
| 408 |
+
|
| 409 |
+
return pass;
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
} // namespace profiler
|
| 413 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_gemm_universal_reduce_impl.hpp
ADDED
|
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/device/impl/device_gemm_xdl_cshuffle_v3r1.hpp"
|
| 13 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/gemm_universal_reduce.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/literals.hpp"
|
| 22 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 23 |
+
|
| 24 |
+
namespace ck {
|
| 25 |
+
namespace profiler {
|
| 26 |
+
|
| 27 |
+
template <typename ADataType,
|
| 28 |
+
typename BDataType,
|
| 29 |
+
typename DsDataType,
|
| 30 |
+
typename AccDataType,
|
| 31 |
+
typename CDataType,
|
| 32 |
+
typename ALayout,
|
| 33 |
+
typename BLayout,
|
| 34 |
+
typename DsLayout,
|
| 35 |
+
typename CLayout>
|
| 36 |
+
bool profile_gemm_universal_reduce_impl(int do_verification,
|
| 37 |
+
int init_method,
|
| 38 |
+
bool do_log,
|
| 39 |
+
bool time_kernel,
|
| 40 |
+
int M,
|
| 41 |
+
int N,
|
| 42 |
+
int K,
|
| 43 |
+
int StrideA,
|
| 44 |
+
int StrideB,
|
| 45 |
+
int StrideC,
|
| 46 |
+
int KBatch,
|
| 47 |
+
int n_warmup,
|
| 48 |
+
int n_iter,
|
| 49 |
+
uint64_t rotating = 0)
|
| 50 |
+
{
|
| 51 |
+
bool pass = true;
|
| 52 |
+
|
| 53 |
+
auto f_host_tensor_descriptor =
|
| 54 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 55 |
+
using namespace ck::literals;
|
| 56 |
+
|
| 57 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 58 |
+
{
|
| 59 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 60 |
+
}
|
| 61 |
+
else
|
| 62 |
+
{
|
| 63 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 64 |
+
}
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
Tensor<ADataType> a_m_k(f_host_tensor_descriptor(M, K, StrideA, ALayout{}));
|
| 68 |
+
Tensor<BDataType> b_k_n(f_host_tensor_descriptor(K, N, StrideB, BLayout{}));
|
| 69 |
+
Tensor<CDataType> c_m_n_host_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 70 |
+
Tensor<CDataType> c_m_n_device_result(f_host_tensor_descriptor(M, N, StrideC, CLayout{}));
|
| 71 |
+
|
| 72 |
+
int total_gemm_needed = a_m_k.GetElementSpaceSizeInBytes() + b_k_n.GetElementSpaceSizeInBytes();
|
| 73 |
+
int rotating_count = std::max(
|
| 74 |
+
1,
|
| 75 |
+
std::min(n_iter,
|
| 76 |
+
static_cast<int>(std::ceil(static_cast<double>(rotating) / total_gemm_needed))));
|
| 77 |
+
|
| 78 |
+
std::cout << "a_m_k: " << a_m_k.mDesc << std::endl;
|
| 79 |
+
std::cout << "b_k_n: " << b_k_n.mDesc << std::endl;
|
| 80 |
+
std::cout << "c_m_n: " << c_m_n_device_result.mDesc << std::endl;
|
| 81 |
+
std::cout << "rotating count: " << rotating_count << std::endl;
|
| 82 |
+
|
| 83 |
+
switch(init_method)
|
| 84 |
+
{
|
| 85 |
+
case 0: break;
|
| 86 |
+
case 1:
|
| 87 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_2<ADataType>{-1, 2});
|
| 88 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_2<BDataType>{-1, 2});
|
| 89 |
+
break;
|
| 90 |
+
default:
|
| 91 |
+
a_m_k.GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0});
|
| 92 |
+
b_k_n.GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5});
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 96 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 97 |
+
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 98 |
+
|
| 99 |
+
const auto a_element_op = AElementOp{};
|
| 100 |
+
const auto b_element_op = BElementOp{};
|
| 101 |
+
const auto c_element_op = CElementOp{};
|
| 102 |
+
|
| 103 |
+
DeviceMem a_device_buf(sizeof(ADataType) * a_m_k.mDesc.GetElementSpaceSize());
|
| 104 |
+
DeviceMem b_device_buf(sizeof(BDataType) * b_k_n.mDesc.GetElementSpaceSize());
|
| 105 |
+
DeviceMem c_device_buf(sizeof(CDataType) * c_m_n_device_result.mDesc.GetElementSpaceSize());
|
| 106 |
+
|
| 107 |
+
a_device_buf.ToDevice(a_m_k.mData.data());
|
| 108 |
+
b_device_buf.ToDevice(b_k_n.mData.data());
|
| 109 |
+
|
| 110 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGemmV2R1<ALayout,
|
| 111 |
+
BLayout,
|
| 112 |
+
DsLayout,
|
| 113 |
+
CLayout,
|
| 114 |
+
ADataType,
|
| 115 |
+
BDataType,
|
| 116 |
+
DsDataType,
|
| 117 |
+
CDataType,
|
| 118 |
+
AElementOp,
|
| 119 |
+
BElementOp,
|
| 120 |
+
CElementOp>;
|
| 121 |
+
|
| 122 |
+
// get device op instances
|
| 123 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 124 |
+
DeviceOp>::GetInstances();
|
| 125 |
+
|
| 126 |
+
std::cout << "found " << op_ptrs.size() << " instances" << std::endl;
|
| 127 |
+
|
| 128 |
+
// Run reference GEMM
|
| 129 |
+
if(do_verification)
|
| 130 |
+
{
|
| 131 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 132 |
+
BDataType,
|
| 133 |
+
CDataType,
|
| 134 |
+
AccDataType,
|
| 135 |
+
AElementOp,
|
| 136 |
+
BElementOp,
|
| 137 |
+
CElementOp>;
|
| 138 |
+
|
| 139 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 140 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 141 |
+
|
| 142 |
+
auto ref_argument = ref_gemm.MakeArgument(
|
| 143 |
+
a_m_k, b_k_n, c_m_n_host_result, a_element_op, b_element_op, c_element_op);
|
| 144 |
+
|
| 145 |
+
ref_invoker.Run(ref_argument);
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
std::string best_op_name;
|
| 149 |
+
float best_ave_time = 0;
|
| 150 |
+
float best_tflops = 0;
|
| 151 |
+
float best_gb_per_sec = 0;
|
| 152 |
+
float best_kbatch = 0;
|
| 153 |
+
|
| 154 |
+
// profile device GEMM instances
|
| 155 |
+
for(auto& op_ptr : op_ptrs)
|
| 156 |
+
{
|
| 157 |
+
std::vector<int> kbatch_list = {1, 2, 4, 8, 12, 16, 19, 20, 32, 38};
|
| 158 |
+
|
| 159 |
+
if(KBatch > 0)
|
| 160 |
+
{
|
| 161 |
+
kbatch_list = {KBatch};
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
for(std::size_t i = 0; i < kbatch_list.size(); i++)
|
| 165 |
+
{
|
| 166 |
+
auto kbatch_curr = kbatch_list[i];
|
| 167 |
+
|
| 168 |
+
auto argument_ptr =
|
| 169 |
+
op_ptr->MakeArgumentPointer(static_cast<ADataType*>(a_device_buf.GetDeviceBuffer()),
|
| 170 |
+
static_cast<BDataType*>(b_device_buf.GetDeviceBuffer()),
|
| 171 |
+
{},
|
| 172 |
+
static_cast<CDataType*>(c_device_buf.GetDeviceBuffer()),
|
| 173 |
+
M,
|
| 174 |
+
N,
|
| 175 |
+
K,
|
| 176 |
+
StrideA,
|
| 177 |
+
StrideB,
|
| 178 |
+
{},
|
| 179 |
+
StrideC,
|
| 180 |
+
kbatch_curr,
|
| 181 |
+
a_element_op,
|
| 182 |
+
b_element_op,
|
| 183 |
+
c_element_op);
|
| 184 |
+
|
| 185 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 186 |
+
|
| 187 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 188 |
+
{
|
| 189 |
+
|
| 190 |
+
DeviceMem gemm_workspace_dev(op_ptr->GetWorkSpaceSize(argument_ptr.get()));
|
| 191 |
+
op_ptr->SetWorkSpacePointer(
|
| 192 |
+
argument_ptr.get(), gemm_workspace_dev.GetDeviceBuffer(), StreamConfig{});
|
| 193 |
+
|
| 194 |
+
// re-init C to zero before profiling next kernel
|
| 195 |
+
c_device_buf.SetZero();
|
| 196 |
+
|
| 197 |
+
invoker_ptr->Run(argument_ptr.get(),
|
| 198 |
+
StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 199 |
+
|
| 200 |
+
if(do_verification)
|
| 201 |
+
{
|
| 202 |
+
c_device_buf.FromDevice(c_m_n_device_result.mData.data());
|
| 203 |
+
|
| 204 |
+
pass = pass & ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
|
| 205 |
+
|
| 206 |
+
if(do_log)
|
| 207 |
+
{
|
| 208 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k.mData, ",") << std::endl;
|
| 209 |
+
LogRangeAsType<float>(std::cout << "b: ", b_k_n.mData, ",") << std::endl;
|
| 210 |
+
LogRangeAsType<float>(
|
| 211 |
+
std::cout << "c_host : ", c_m_n_host_result.mData, ",")
|
| 212 |
+
<< std::endl;
|
| 213 |
+
LogRangeAsType<float>(
|
| 214 |
+
std::cout << "c_device: ", c_m_n_device_result.mData, ",")
|
| 215 |
+
<< std::endl;
|
| 216 |
+
}
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 220 |
+
|
| 221 |
+
float ave_time = invoker_ptr->Run(argument_ptr.get(),
|
| 222 |
+
StreamConfig{nullptr,
|
| 223 |
+
time_kernel,
|
| 224 |
+
0,
|
| 225 |
+
n_warmup,
|
| 226 |
+
n_iter,
|
| 227 |
+
rotating_count > 1,
|
| 228 |
+
rotating_count});
|
| 229 |
+
|
| 230 |
+
std::size_t flop = std::size_t(2) * M * N * K;
|
| 231 |
+
|
| 232 |
+
std::size_t num_btype = sizeof(ADataType) * M * K + sizeof(BDataType) * K * N +
|
| 233 |
+
sizeof(CDataType) * M * N;
|
| 234 |
+
|
| 235 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 236 |
+
|
| 237 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 238 |
+
|
| 239 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops
|
| 240 |
+
<< " TFlops, " << gb_per_sec << " GB/s, " << op_name << ", KBatch "
|
| 241 |
+
<< kbatch_curr << std::endl;
|
| 242 |
+
|
| 243 |
+
#if defined CK_ENABLE_FP8
|
| 244 |
+
// set softer tolerances for fp8
|
| 245 |
+
if constexpr(is_same_v<ADataType, f8_t> || is_same_v<BDataType, f8_t> ||
|
| 246 |
+
is_same_v<CDataType, f8_t>)
|
| 247 |
+
{
|
| 248 |
+
std::string msg = "Error: Incorrect results!";
|
| 249 |
+
double rtol = 1e-1;
|
| 250 |
+
double atol = 1e-1;
|
| 251 |
+
pass = pass & ck::utils::check_err(
|
| 252 |
+
c_m_n_device_result, c_m_n_host_result, msg, rtol, atol);
|
| 253 |
+
}
|
| 254 |
+
else
|
| 255 |
+
{
|
| 256 |
+
#endif
|
| 257 |
+
pass = pass & ck::utils::check_err(c_m_n_device_result, c_m_n_host_result);
|
| 258 |
+
#if defined CK_ENABLE_FP8
|
| 259 |
+
}
|
| 260 |
+
#endif
|
| 261 |
+
|
| 262 |
+
if(tflops > best_tflops)
|
| 263 |
+
{
|
| 264 |
+
best_op_name = op_name;
|
| 265 |
+
best_tflops = tflops;
|
| 266 |
+
best_ave_time = ave_time;
|
| 267 |
+
best_gb_per_sec = gb_per_sec;
|
| 268 |
+
best_kbatch = kbatch_curr;
|
| 269 |
+
}
|
| 270 |
+
}
|
| 271 |
+
else
|
| 272 |
+
{
|
| 273 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem"
|
| 274 |
+
<< std::endl;
|
| 275 |
+
}
|
| 276 |
+
}
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
if constexpr(is_same<CDataType, float>::value)
|
| 280 |
+
{
|
| 281 |
+
std::cout << "Best Perf for datatype = f32";
|
| 282 |
+
}
|
| 283 |
+
else if constexpr(is_same<CDataType, half_t>::value)
|
| 284 |
+
{
|
| 285 |
+
std::cout << "Best Perf for datatype = f16";
|
| 286 |
+
}
|
| 287 |
+
else if constexpr(is_same<CDataType, bhalf_t>::value)
|
| 288 |
+
{
|
| 289 |
+
std::cout << "Best Perf for datatype = bf16";
|
| 290 |
+
}
|
| 291 |
+
else if constexpr(is_same<CDataType, int8_t>::value)
|
| 292 |
+
{
|
| 293 |
+
std::cout << "Best Perf for datatype = int8";
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
if constexpr(is_same<ALayout, tensor_layout::gemm::RowMajor>::value)
|
| 297 |
+
{
|
| 298 |
+
std::cout << " ALayout = RowMajor";
|
| 299 |
+
}
|
| 300 |
+
else if constexpr(is_same<ALayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 301 |
+
{
|
| 302 |
+
std::cout << " ALayout = ColumnMajor";
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
if constexpr(is_same<BLayout, tensor_layout::gemm::RowMajor>::value)
|
| 306 |
+
{
|
| 307 |
+
std::cout << " BLayout = RowMajor";
|
| 308 |
+
}
|
| 309 |
+
else if constexpr(is_same<BLayout, tensor_layout::gemm::ColumnMajor>::value)
|
| 310 |
+
{
|
| 311 |
+
std::cout << " BLayout = ColumnMajor";
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
std::cout << " M = " << M << " N = " << N << " K = " << K << " StrideA = " << StrideA
|
| 315 |
+
<< " StrideB = " << StrideB << " StrideC = " << StrideC << " KBatch = " << best_kbatch
|
| 316 |
+
<< " : " << best_ave_time << " ms, " << best_tflops << " TFlops, " << best_gb_per_sec
|
| 317 |
+
<< " GB/s, " << best_op_name << std::endl;
|
| 318 |
+
|
| 319 |
+
return pass;
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
} // namespace profiler
|
| 323 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_conv_fwd_bias_clamp_impl.hpp
ADDED
|
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2025, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 13 |
+
|
| 14 |
+
#include "ck/library/tensor_operation_instance/gpu/grouped_convolution_forward_bias_clamp.hpp"
|
| 15 |
+
|
| 16 |
+
#include "ck/library/utility/algorithm.hpp"
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
| 22 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
| 23 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp"
|
| 24 |
+
|
| 25 |
+
namespace ck {
|
| 26 |
+
namespace profiler {
|
| 27 |
+
|
| 28 |
+
// NOTE: Usage of NHWGK layout for GK bias is a workaround. This test is to
|
| 29 |
+
// just keep such implementation valid.
|
| 30 |
+
// TODO: Add possiblity to pass GK layout and GK lengths for bias and reuse
|
| 31 |
+
// the same instances.
|
| 32 |
+
|
| 33 |
+
template <ck::index_t NDimSpatial>
|
| 34 |
+
auto get_bias_desc(ck::index_t G, ck::index_t K)
|
| 35 |
+
{
|
| 36 |
+
if constexpr(NDimSpatial == 1)
|
| 37 |
+
{
|
| 38 |
+
return HostTensorDescriptor({G, 1, K, 1}, {K, 0, 1, 0});
|
| 39 |
+
}
|
| 40 |
+
else if constexpr(NDimSpatial == 2)
|
| 41 |
+
{
|
| 42 |
+
return HostTensorDescriptor({G, 1, K, 1, 1}, {K, 0, 1, 0, 0});
|
| 43 |
+
}
|
| 44 |
+
else
|
| 45 |
+
{
|
| 46 |
+
return HostTensorDescriptor({G, 1, K, 1, 1, 1}, {K, 0, 1, 0, 0, 0});
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
template <ck::index_t NDimSpatial,
|
| 51 |
+
typename InLayout,
|
| 52 |
+
typename WeiLayout,
|
| 53 |
+
typename OutLayout,
|
| 54 |
+
typename InDataType,
|
| 55 |
+
typename WeiDataType,
|
| 56 |
+
typename OutDataType,
|
| 57 |
+
typename AComputeType = InDataType,
|
| 58 |
+
typename BComputeType = AComputeType,
|
| 59 |
+
typename IndexType = ck::index_t,
|
| 60 |
+
bool BiasGK = false>
|
| 61 |
+
bool profile_grouped_conv_fwd_bias_clamp_impl(int do_verification,
|
| 62 |
+
int init_method,
|
| 63 |
+
bool do_log,
|
| 64 |
+
bool time_kernel,
|
| 65 |
+
const ck::utils::conv::ConvParam& conv_param)
|
| 66 |
+
{
|
| 67 |
+
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 68 |
+
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 69 |
+
using OutElementOp = ck::tensor_operation::element_wise::AddClamp;
|
| 70 |
+
|
| 71 |
+
const float floor = 0.f;
|
| 72 |
+
const float ceil = 256.f;
|
| 73 |
+
|
| 74 |
+
const auto in_element_op = InElementOp{};
|
| 75 |
+
const auto wei_element_op = WeiElementOp{};
|
| 76 |
+
const auto out_element_op = OutElementOp{floor, ceil};
|
| 77 |
+
|
| 78 |
+
const auto in_g_n_c_wis_desc =
|
| 79 |
+
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>(conv_param);
|
| 80 |
+
|
| 81 |
+
const auto wei_g_k_c_xs_desc =
|
| 82 |
+
ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>(conv_param);
|
| 83 |
+
|
| 84 |
+
const auto out_g_n_k_wos_desc =
|
| 85 |
+
ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>(conv_param);
|
| 86 |
+
|
| 87 |
+
const index_t G = conv_param.G_;
|
| 88 |
+
const index_t K = conv_param.K_;
|
| 89 |
+
|
| 90 |
+
std::array<IndexType, NDimSpatial + 3> a_g_n_c_wis_lengths{};
|
| 91 |
+
std::array<IndexType, NDimSpatial + 3> a_g_n_c_wis_strides{};
|
| 92 |
+
std::array<IndexType, NDimSpatial + 3> b_g_k_c_xs_lengths{};
|
| 93 |
+
std::array<IndexType, NDimSpatial + 3> b_g_k_c_xs_strides{};
|
| 94 |
+
std::array<IndexType, NDimSpatial + 3> e_g_n_k_wos_lengths{};
|
| 95 |
+
std::array<IndexType, NDimSpatial + 3> e_g_n_k_wos_strides{};
|
| 96 |
+
std::array<IndexType, NDimSpatial + 3> d_g_n_k_wos_strides{};
|
| 97 |
+
std::array<IndexType, NDimSpatial> conv_filter_strides{};
|
| 98 |
+
std::array<IndexType, NDimSpatial> conv_filter_dilations{};
|
| 99 |
+
std::array<IndexType, NDimSpatial> input_left_pads{};
|
| 100 |
+
std::array<IndexType, NDimSpatial> input_right_pads{};
|
| 101 |
+
|
| 102 |
+
auto copy = [](const auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };
|
| 103 |
+
|
| 104 |
+
copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths);
|
| 105 |
+
copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides);
|
| 106 |
+
copy(wei_g_k_c_xs_desc.GetLengths(), b_g_k_c_xs_lengths);
|
| 107 |
+
copy(wei_g_k_c_xs_desc.GetStrides(), b_g_k_c_xs_strides);
|
| 108 |
+
copy(out_g_n_k_wos_desc.GetLengths(), e_g_n_k_wos_lengths);
|
| 109 |
+
copy(out_g_n_k_wos_desc.GetStrides(), e_g_n_k_wos_strides);
|
| 110 |
+
copy(out_g_n_k_wos_desc.GetStrides(), d_g_n_k_wos_strides);
|
| 111 |
+
copy(conv_param.conv_filter_strides_, conv_filter_strides);
|
| 112 |
+
copy(conv_param.conv_filter_dilations_, conv_filter_dilations);
|
| 113 |
+
copy(conv_param.input_left_pads_, input_left_pads);
|
| 114 |
+
copy(conv_param.input_right_pads_, input_right_pads);
|
| 115 |
+
|
| 116 |
+
Tensor<InDataType> input(in_g_n_c_wis_desc);
|
| 117 |
+
Tensor<WeiDataType> weight(wei_g_k_c_xs_desc);
|
| 118 |
+
Tensor<OutDataType> host_output(out_g_n_k_wos_desc);
|
| 119 |
+
Tensor<OutDataType> device_output(out_g_n_k_wos_desc);
|
| 120 |
+
const auto bias_desc = BiasGK ? get_bias_desc<NDimSpatial>(G, K) : out_g_n_k_wos_desc;
|
| 121 |
+
Tensor<OutDataType> bias(bias_desc);
|
| 122 |
+
|
| 123 |
+
std::cout << "input: " << input.mDesc << std::endl;
|
| 124 |
+
std::cout << "weight: " << weight.mDesc << std::endl;
|
| 125 |
+
std::cout << "output: " << host_output.mDesc << std::endl;
|
| 126 |
+
std::cout << "bias: " << bias.mDesc << std::endl;
|
| 127 |
+
|
| 128 |
+
switch(init_method)
|
| 129 |
+
{
|
| 130 |
+
case 0: break;
|
| 131 |
+
case 1:
|
| 132 |
+
input.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
|
| 133 |
+
weight.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-5, 5});
|
| 134 |
+
bias.GenerateTensorValue(GeneratorTensor_2<OutDataType>{-5, 5});
|
| 135 |
+
break;
|
| 136 |
+
default:
|
| 137 |
+
input.GenerateTensorValue(GeneratorTensor_3<InDataType>{0.0, 1.0});
|
| 138 |
+
weight.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
|
| 139 |
+
bias.GenerateTensorValue(GeneratorTensor_3<OutDataType>{-0.5, 0.5});
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpaceSize());
|
| 143 |
+
DeviceMem wei_device_buf(sizeof(WeiDataType) * weight.mDesc.GetElementSpaceSize());
|
| 144 |
+
DeviceMem out_device_buf(sizeof(OutDataType) * device_output.mDesc.GetElementSpaceSize());
|
| 145 |
+
|
| 146 |
+
const std::size_t bias_dev_buf_size =
|
| 147 |
+
BiasGK ? sizeof(OutDataType) * G * K
|
| 148 |
+
: sizeof(OutDataType) * device_output.mDesc.GetElementSpaceSize();
|
| 149 |
+
DeviceMem bias_device_buf(bias_dev_buf_size);
|
| 150 |
+
|
| 151 |
+
in_device_buf.ToDevice(input.mData.data());
|
| 152 |
+
wei_device_buf.ToDevice(weight.mData.data());
|
| 153 |
+
bias_device_buf.ToDevice(bias.mData.data());
|
| 154 |
+
|
| 155 |
+
// run reference op
|
| 156 |
+
if(do_verification)
|
| 157 |
+
{
|
| 158 |
+
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<NDimSpatial,
|
| 159 |
+
InDataType,
|
| 160 |
+
WeiDataType,
|
| 161 |
+
OutDataType,
|
| 162 |
+
InElementOp,
|
| 163 |
+
WeiElementOp,
|
| 164 |
+
OutElementOp,
|
| 165 |
+
0,
|
| 166 |
+
0,
|
| 167 |
+
1>{};
|
| 168 |
+
|
| 169 |
+
std::array<Tensor<OutDataType>, 1> d_tensors = {bias};
|
| 170 |
+
auto ref_invoker = ref_conv.MakeInvoker();
|
| 171 |
+
auto ref_argument = ref_conv.MakeArgument(input,
|
| 172 |
+
weight,
|
| 173 |
+
host_output,
|
| 174 |
+
conv_param.conv_filter_strides_,
|
| 175 |
+
conv_param.conv_filter_dilations_,
|
| 176 |
+
conv_param.input_left_pads_,
|
| 177 |
+
conv_param.input_right_pads_,
|
| 178 |
+
in_element_op,
|
| 179 |
+
wei_element_op,
|
| 180 |
+
out_element_op,
|
| 181 |
+
{},
|
| 182 |
+
{},
|
| 183 |
+
d_tensors);
|
| 184 |
+
|
| 185 |
+
// init host output to zero
|
| 186 |
+
host_output.SetZero();
|
| 187 |
+
|
| 188 |
+
ref_invoker.Run(ref_argument);
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
std::string best_op_name;
|
| 192 |
+
float best_avg_time = 0;
|
| 193 |
+
float best_tflops = 0;
|
| 194 |
+
float best_gb_per_sec = 0;
|
| 195 |
+
|
| 196 |
+
// profile device op instances
|
| 197 |
+
bool pass = true;
|
| 198 |
+
|
| 199 |
+
auto run_impl = [&](auto& op_ptr, auto& argument_ptr) {
|
| 200 |
+
// workspace_sz will be equal to 0 for other layout than NGCHW
|
| 201 |
+
const std::size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
| 202 |
+
DeviceMem workspace_dev(workspace_sz);
|
| 203 |
+
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_dev.GetDeviceBuffer());
|
| 204 |
+
|
| 205 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 206 |
+
{
|
| 207 |
+
// re-init output to zero before profiling next kernel
|
| 208 |
+
out_device_buf.SetZero();
|
| 209 |
+
|
| 210 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 211 |
+
|
| 212 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 213 |
+
|
| 214 |
+
float avg_time =
|
| 215 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 216 |
+
|
| 217 |
+
std::size_t flop = conv_param.GetFlops();
|
| 218 |
+
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
| 219 |
+
|
| 220 |
+
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
| 221 |
+
|
| 222 |
+
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
| 223 |
+
|
| 224 |
+
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, "
|
| 225 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 226 |
+
|
| 227 |
+
if(tflops > best_tflops)
|
| 228 |
+
{
|
| 229 |
+
best_op_name = op_name;
|
| 230 |
+
best_tflops = tflops;
|
| 231 |
+
best_avg_time = avg_time;
|
| 232 |
+
best_gb_per_sec = gb_per_sec;
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
if(do_verification)
|
| 236 |
+
{
|
| 237 |
+
out_device_buf.FromDevice(device_output.mData.data());
|
| 238 |
+
|
| 239 |
+
pass = pass & ck::utils::check_err(device_output, host_output);
|
| 240 |
+
|
| 241 |
+
if(do_log)
|
| 242 |
+
{
|
| 243 |
+
LogRangeAsType<float>(std::cout << "input : ", input.mData, ",") << std::endl;
|
| 244 |
+
LogRangeAsType<float>(std::cout << "weight: ", weight.mData, ",") << std::endl;
|
| 245 |
+
LogRangeAsType<float>(std::cout << "host_output : ", host_output.mData, ",")
|
| 246 |
+
<< std::endl;
|
| 247 |
+
LogRangeAsType<float>(std::cout << "device_output: ", device_output.mData, ",")
|
| 248 |
+
<< std::endl;
|
| 249 |
+
}
|
| 250 |
+
}
|
| 251 |
+
}
|
| 252 |
+
else
|
| 253 |
+
{
|
| 254 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 255 |
+
}
|
| 256 |
+
};
|
| 257 |
+
|
| 258 |
+
using DeviceOp =
|
| 259 |
+
ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD<NDimSpatial,
|
| 260 |
+
InLayout,
|
| 261 |
+
WeiLayout,
|
| 262 |
+
ck::Tuple<OutLayout>,
|
| 263 |
+
OutLayout,
|
| 264 |
+
InDataType,
|
| 265 |
+
WeiDataType,
|
| 266 |
+
ck::Tuple<OutDataType>,
|
| 267 |
+
OutDataType,
|
| 268 |
+
InElementOp,
|
| 269 |
+
WeiElementOp,
|
| 270 |
+
OutElementOp,
|
| 271 |
+
AComputeType,
|
| 272 |
+
BComputeType>;
|
| 273 |
+
|
| 274 |
+
// get device op instances
|
| 275 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 276 |
+
DeviceOp>::GetInstances();
|
| 277 |
+
|
| 278 |
+
std::cout << "ckProfiler found " << op_ptrs.size() << " instances" << std::endl;
|
| 279 |
+
|
| 280 |
+
if constexpr(BiasGK)
|
| 281 |
+
{
|
| 282 |
+
constexpr ck::index_t spatial_offset = 3;
|
| 283 |
+
d_g_n_k_wos_strides[1] = 0;
|
| 284 |
+
for(int i = 0; i < NDimSpatial; i++)
|
| 285 |
+
{
|
| 286 |
+
d_g_n_k_wos_strides[i + spatial_offset] = 0;
|
| 287 |
+
}
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
for(auto& op_ptr : op_ptrs)
|
| 291 |
+
{
|
| 292 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(in_device_buf.GetDeviceBuffer(),
|
| 293 |
+
wei_device_buf.GetDeviceBuffer(),
|
| 294 |
+
{bias_device_buf.GetDeviceBuffer()},
|
| 295 |
+
out_device_buf.GetDeviceBuffer(),
|
| 296 |
+
a_g_n_c_wis_lengths,
|
| 297 |
+
a_g_n_c_wis_strides,
|
| 298 |
+
b_g_k_c_xs_lengths,
|
| 299 |
+
b_g_k_c_xs_strides,
|
| 300 |
+
{e_g_n_k_wos_lengths},
|
| 301 |
+
{d_g_n_k_wos_strides},
|
| 302 |
+
e_g_n_k_wos_lengths,
|
| 303 |
+
e_g_n_k_wos_strides,
|
| 304 |
+
conv_filter_strides,
|
| 305 |
+
conv_filter_dilations,
|
| 306 |
+
input_left_pads,
|
| 307 |
+
input_right_pads,
|
| 308 |
+
in_element_op,
|
| 309 |
+
wei_element_op,
|
| 310 |
+
out_element_op);
|
| 311 |
+
|
| 312 |
+
run_impl(op_ptr, argument_ptr);
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
std::cout << "Best configuration parameters:"
|
| 316 |
+
<< "\nname: " << best_op_name << "\navg_time: " << best_avg_time
|
| 317 |
+
<< "\ntflops: " << best_tflops << "\nGB/s: " << best_gb_per_sec << std::endl;
|
| 318 |
+
|
| 319 |
+
return pass;
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
} // namespace profiler
|
| 323 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_conv_fwd_impl.hpp
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
#include <iostream>
|
| 8 |
+
#include <typeinfo>
|
| 9 |
+
|
| 10 |
+
#include "ck/ck.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 13 |
+
|
| 14 |
+
#include "ck/library/tensor_operation_instance/gpu/grouped_convolution_forward.hpp"
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/grouped_convolution_forward_clamp.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/algorithm.hpp"
|
| 18 |
+
#include "ck/library/utility/check_err.hpp"
|
| 19 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 21 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 22 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
| 23 |
+
#include "ck/library/utility/convolution_host_tensor_descriptor_helper.hpp"
|
| 24 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp"
|
| 25 |
+
|
| 26 |
+
namespace ck {
|
| 27 |
+
namespace profiler {
|
| 28 |
+
|
| 29 |
+
template <ck::index_t NDimSpatial,
|
| 30 |
+
typename InLayout,
|
| 31 |
+
typename WeiLayout,
|
| 32 |
+
typename OutLayout,
|
| 33 |
+
typename InDataType,
|
| 34 |
+
typename WeiDataType,
|
| 35 |
+
typename OutDataType,
|
| 36 |
+
typename AComputeType = InDataType,
|
| 37 |
+
typename BComputeType = AComputeType,
|
| 38 |
+
typename IndexType = ck::index_t,
|
| 39 |
+
typename OutElementOp = ck::tensor_operation::element_wise::PassThrough>
|
| 40 |
+
bool profile_grouped_conv_fwd_impl(int do_verification,
|
| 41 |
+
int init_method,
|
| 42 |
+
bool do_log,
|
| 43 |
+
bool time_kernel,
|
| 44 |
+
const ck::utils::conv::ConvParam& conv_param,
|
| 45 |
+
const OutElementOp out_element_op = OutElementOp{})
|
| 46 |
+
{
|
| 47 |
+
using InElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 48 |
+
using WeiElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 49 |
+
|
| 50 |
+
const auto in_element_op = InElementOp{};
|
| 51 |
+
const auto wei_element_op = WeiElementOp{};
|
| 52 |
+
|
| 53 |
+
const auto in_g_n_c_wis_desc =
|
| 54 |
+
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>(conv_param);
|
| 55 |
+
|
| 56 |
+
const auto wei_g_k_c_xs_desc =
|
| 57 |
+
ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>(conv_param);
|
| 58 |
+
|
| 59 |
+
const auto out_g_n_k_wos_desc =
|
| 60 |
+
ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>(conv_param);
|
| 61 |
+
|
| 62 |
+
std::array<IndexType, NDimSpatial + 3> a_g_n_c_wis_lengths{};
|
| 63 |
+
std::array<IndexType, NDimSpatial + 3> a_g_n_c_wis_strides{};
|
| 64 |
+
std::array<IndexType, NDimSpatial + 3> b_g_k_c_xs_lengths{};
|
| 65 |
+
std::array<IndexType, NDimSpatial + 3> b_g_k_c_xs_strides{};
|
| 66 |
+
std::array<IndexType, NDimSpatial + 3> e_g_n_k_wos_lengths{};
|
| 67 |
+
std::array<IndexType, NDimSpatial + 3> e_g_n_k_wos_strides{};
|
| 68 |
+
std::array<IndexType, NDimSpatial> conv_filter_strides{};
|
| 69 |
+
std::array<IndexType, NDimSpatial> conv_filter_dilations{};
|
| 70 |
+
std::array<IndexType, NDimSpatial> input_left_pads{};
|
| 71 |
+
std::array<IndexType, NDimSpatial> input_right_pads{};
|
| 72 |
+
|
| 73 |
+
auto copy = [](const auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };
|
| 74 |
+
|
| 75 |
+
copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths);
|
| 76 |
+
copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides);
|
| 77 |
+
copy(wei_g_k_c_xs_desc.GetLengths(), b_g_k_c_xs_lengths);
|
| 78 |
+
copy(wei_g_k_c_xs_desc.GetStrides(), b_g_k_c_xs_strides);
|
| 79 |
+
copy(out_g_n_k_wos_desc.GetLengths(), e_g_n_k_wos_lengths);
|
| 80 |
+
copy(out_g_n_k_wos_desc.GetStrides(), e_g_n_k_wos_strides);
|
| 81 |
+
copy(conv_param.conv_filter_strides_, conv_filter_strides);
|
| 82 |
+
copy(conv_param.conv_filter_dilations_, conv_filter_dilations);
|
| 83 |
+
copy(conv_param.input_left_pads_, input_left_pads);
|
| 84 |
+
copy(conv_param.input_right_pads_, input_right_pads);
|
| 85 |
+
|
| 86 |
+
Tensor<InDataType> input(in_g_n_c_wis_desc);
|
| 87 |
+
Tensor<WeiDataType> weight(wei_g_k_c_xs_desc);
|
| 88 |
+
Tensor<OutDataType> host_output(out_g_n_k_wos_desc);
|
| 89 |
+
Tensor<OutDataType> device_output(out_g_n_k_wos_desc);
|
| 90 |
+
|
| 91 |
+
std::cout << "input: " << input.mDesc << std::endl;
|
| 92 |
+
std::cout << "weight: " << weight.mDesc << std::endl;
|
| 93 |
+
std::cout << "output: " << host_output.mDesc << std::endl;
|
| 94 |
+
|
| 95 |
+
switch(init_method)
|
| 96 |
+
{
|
| 97 |
+
case 0: break;
|
| 98 |
+
case 1:
|
| 99 |
+
input.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
|
| 100 |
+
weight.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-5, 5});
|
| 101 |
+
break;
|
| 102 |
+
default:
|
| 103 |
+
input.GenerateTensorValue(GeneratorTensor_3<InDataType>{0.0, 1.0});
|
| 104 |
+
weight.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-0.5, 0.5});
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpaceSize());
|
| 108 |
+
DeviceMem wei_device_buf(sizeof(WeiDataType) * weight.mDesc.GetElementSpaceSize());
|
| 109 |
+
DeviceMem out_device_buf(sizeof(OutDataType) * device_output.mDesc.GetElementSpaceSize());
|
| 110 |
+
|
| 111 |
+
in_device_buf.ToDevice(input.mData.data());
|
| 112 |
+
wei_device_buf.ToDevice(weight.mData.data());
|
| 113 |
+
|
| 114 |
+
// run reference op
|
| 115 |
+
if(do_verification)
|
| 116 |
+
{
|
| 117 |
+
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<NDimSpatial,
|
| 118 |
+
InDataType,
|
| 119 |
+
WeiDataType,
|
| 120 |
+
OutDataType,
|
| 121 |
+
InElementOp,
|
| 122 |
+
WeiElementOp,
|
| 123 |
+
OutElementOp>{};
|
| 124 |
+
|
| 125 |
+
auto ref_invoker = ref_conv.MakeInvoker();
|
| 126 |
+
auto ref_argument = ref_conv.MakeArgument(input,
|
| 127 |
+
weight,
|
| 128 |
+
host_output,
|
| 129 |
+
conv_param.conv_filter_strides_,
|
| 130 |
+
conv_param.conv_filter_dilations_,
|
| 131 |
+
conv_param.input_left_pads_,
|
| 132 |
+
conv_param.input_right_pads_,
|
| 133 |
+
in_element_op,
|
| 134 |
+
wei_element_op,
|
| 135 |
+
out_element_op);
|
| 136 |
+
|
| 137 |
+
// init host output to zero
|
| 138 |
+
host_output.SetZero();
|
| 139 |
+
|
| 140 |
+
ref_invoker.Run(ref_argument);
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
std::string best_op_name;
|
| 144 |
+
float best_avg_time = 0;
|
| 145 |
+
float best_tflops = 0;
|
| 146 |
+
float best_gb_per_sec = 0;
|
| 147 |
+
|
| 148 |
+
// profile device op instances
|
| 149 |
+
bool pass = true;
|
| 150 |
+
|
| 151 |
+
auto run_impl = [&](auto& op_ptr, auto& argument_ptr) {
|
| 152 |
+
// workspace_sz will be equal to 0 for other layout than NGCHW
|
| 153 |
+
const std::size_t workspace_sz = op_ptr->GetWorkSpaceSize(argument_ptr.get());
|
| 154 |
+
DeviceMem workspace_dev(workspace_sz);
|
| 155 |
+
op_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_dev.GetDeviceBuffer());
|
| 156 |
+
|
| 157 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 158 |
+
{
|
| 159 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 160 |
+
|
| 161 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 162 |
+
|
| 163 |
+
float avg_time =
|
| 164 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 165 |
+
|
| 166 |
+
std::size_t flop = conv_param.GetFlops();
|
| 167 |
+
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
| 168 |
+
|
| 169 |
+
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
| 170 |
+
|
| 171 |
+
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
| 172 |
+
|
| 173 |
+
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, "
|
| 174 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 175 |
+
|
| 176 |
+
if(tflops > best_tflops)
|
| 177 |
+
{
|
| 178 |
+
best_op_name = op_name;
|
| 179 |
+
best_tflops = tflops;
|
| 180 |
+
best_avg_time = avg_time;
|
| 181 |
+
best_gb_per_sec = gb_per_sec;
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
if(do_verification)
|
| 185 |
+
{
|
| 186 |
+
out_device_buf.FromDevice(device_output.mData.data());
|
| 187 |
+
|
| 188 |
+
pass = pass & ck::utils::check_err(device_output, host_output);
|
| 189 |
+
|
| 190 |
+
if(do_log)
|
| 191 |
+
{
|
| 192 |
+
LogRangeAsType<float>(std::cout << "input : ", input.mData, ",") << std::endl;
|
| 193 |
+
LogRangeAsType<float>(std::cout << "weight: ", weight.mData, ",") << std::endl;
|
| 194 |
+
LogRangeAsType<float>(std::cout << "host_output : ", host_output.mData, ",")
|
| 195 |
+
<< std::endl;
|
| 196 |
+
LogRangeAsType<float>(std::cout << "device_output: ", device_output.mData, ",")
|
| 197 |
+
<< std::endl;
|
| 198 |
+
}
|
| 199 |
+
}
|
| 200 |
+
}
|
| 201 |
+
else
|
| 202 |
+
{
|
| 203 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 204 |
+
}
|
| 205 |
+
};
|
| 206 |
+
|
| 207 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD<NDimSpatial,
|
| 208 |
+
InLayout,
|
| 209 |
+
WeiLayout,
|
| 210 |
+
ck::Tuple<>,
|
| 211 |
+
OutLayout,
|
| 212 |
+
InDataType,
|
| 213 |
+
WeiDataType,
|
| 214 |
+
ck::Tuple<>,
|
| 215 |
+
OutDataType,
|
| 216 |
+
InElementOp,
|
| 217 |
+
WeiElementOp,
|
| 218 |
+
OutElementOp,
|
| 219 |
+
AComputeType,
|
| 220 |
+
BComputeType>;
|
| 221 |
+
|
| 222 |
+
// get device op instances
|
| 223 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 224 |
+
DeviceOp>::GetInstances();
|
| 225 |
+
|
| 226 |
+
std::cout << "ckProfiler found " << op_ptrs.size() << " instances" << std::endl;
|
| 227 |
+
|
| 228 |
+
for(auto& op_ptr : op_ptrs)
|
| 229 |
+
{
|
| 230 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(in_device_buf.GetDeviceBuffer(),
|
| 231 |
+
wei_device_buf.GetDeviceBuffer(),
|
| 232 |
+
{},
|
| 233 |
+
out_device_buf.GetDeviceBuffer(),
|
| 234 |
+
a_g_n_c_wis_lengths,
|
| 235 |
+
a_g_n_c_wis_strides,
|
| 236 |
+
b_g_k_c_xs_lengths,
|
| 237 |
+
b_g_k_c_xs_strides,
|
| 238 |
+
{},
|
| 239 |
+
{},
|
| 240 |
+
e_g_n_k_wos_lengths,
|
| 241 |
+
e_g_n_k_wos_strides,
|
| 242 |
+
conv_filter_strides,
|
| 243 |
+
conv_filter_dilations,
|
| 244 |
+
input_left_pads,
|
| 245 |
+
input_right_pads,
|
| 246 |
+
in_element_op,
|
| 247 |
+
wei_element_op,
|
| 248 |
+
out_element_op);
|
| 249 |
+
|
| 250 |
+
run_impl(op_ptr, argument_ptr);
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
std::cout << "Best configuration parameters:"
|
| 254 |
+
<< "\nname: " << best_op_name << "\navg_time: " << best_avg_time
|
| 255 |
+
<< "\ntflops: " << best_tflops << "\nGB/s: " << best_gb_per_sec << std::endl;
|
| 256 |
+
|
| 257 |
+
return pass;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
} // namespace profiler
|
| 261 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_conv_fwd_outelementop_impl.hpp
ADDED
|
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include "ck/library/tensor_operation_instance/gpu/grouped_convolution_forward_convscale.hpp"
|
| 4 |
+
#include "ck/library/tensor_operation_instance/gpu/grouped_convolution_forward_convinvscale.hpp"
|
| 5 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_conv_fwd.hpp"
|
| 6 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 7 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 8 |
+
|
| 9 |
+
namespace ck {
|
| 10 |
+
namespace profiler {
|
| 11 |
+
|
| 12 |
+
template <typename DataType>
|
| 13 |
+
inline constexpr double get_rtol()
|
| 14 |
+
{
|
| 15 |
+
if constexpr(std::is_same_v<DataType, float>)
|
| 16 |
+
{
|
| 17 |
+
return 1e-3;
|
| 18 |
+
}
|
| 19 |
+
else if constexpr(std::is_same_v<DataType, double>)
|
| 20 |
+
{
|
| 21 |
+
return 1e-6;
|
| 22 |
+
}
|
| 23 |
+
else if constexpr(std::is_same_v<DataType, ck::half_t>)
|
| 24 |
+
{
|
| 25 |
+
return 1e-3;
|
| 26 |
+
}
|
| 27 |
+
else if constexpr(std::is_same_v<DataType, ck::bhalf_t>)
|
| 28 |
+
{
|
| 29 |
+
return 5e-2;
|
| 30 |
+
}
|
| 31 |
+
else if constexpr(std::is_same_v<DataType, int32_t>)
|
| 32 |
+
{
|
| 33 |
+
return 1e-1;
|
| 34 |
+
}
|
| 35 |
+
else if constexpr(std::is_same_v<DataType, int8_t>)
|
| 36 |
+
{
|
| 37 |
+
return 1e-1;
|
| 38 |
+
}
|
| 39 |
+
else if constexpr(std::is_same_v<DataType, ck::f8_t>)
|
| 40 |
+
{
|
| 41 |
+
return 1e-1; // 240 and 224 are acceptable
|
| 42 |
+
}
|
| 43 |
+
else if constexpr(std::is_same_v<DataType, ck::bf8_t>)
|
| 44 |
+
{
|
| 45 |
+
return 1.5e-1; // 57344 and 49152 are acceptable
|
| 46 |
+
}
|
| 47 |
+
else
|
| 48 |
+
{
|
| 49 |
+
return 1e-3;
|
| 50 |
+
}
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
template <typename DataType>
|
| 54 |
+
inline constexpr double get_atol()
|
| 55 |
+
{
|
| 56 |
+
if constexpr(std::is_same_v<DataType, float>)
|
| 57 |
+
{
|
| 58 |
+
return 1e-3;
|
| 59 |
+
}
|
| 60 |
+
else if constexpr(std::is_same_v<DataType, double>)
|
| 61 |
+
{
|
| 62 |
+
return 1e-6;
|
| 63 |
+
}
|
| 64 |
+
else if constexpr(std::is_same_v<DataType, ck::half_t>)
|
| 65 |
+
{
|
| 66 |
+
return 1e-3;
|
| 67 |
+
}
|
| 68 |
+
else if constexpr(std::is_same_v<DataType, ck::bhalf_t>)
|
| 69 |
+
{
|
| 70 |
+
return 5e-2;
|
| 71 |
+
}
|
| 72 |
+
else if constexpr(std::is_same_v<DataType, int32_t>)
|
| 73 |
+
{
|
| 74 |
+
return 1e-1;
|
| 75 |
+
}
|
| 76 |
+
else if constexpr(std::is_same_v<DataType, int8_t>)
|
| 77 |
+
{
|
| 78 |
+
return 1e-1;
|
| 79 |
+
}
|
| 80 |
+
else if constexpr(std::is_same_v<DataType, ck::f8_t>)
|
| 81 |
+
{
|
| 82 |
+
return 16.1; // 240 and 224 are acceptable
|
| 83 |
+
}
|
| 84 |
+
else if constexpr(std::is_same_v<DataType, ck::bf8_t>)
|
| 85 |
+
{
|
| 86 |
+
return 8192.1; // 57344 and 49152 are acceptable
|
| 87 |
+
}
|
| 88 |
+
else
|
| 89 |
+
{
|
| 90 |
+
return 1e-3;
|
| 91 |
+
}
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
template <ck::index_t NDimSpatial,
|
| 95 |
+
typename InLayout,
|
| 96 |
+
typename WeiLayout,
|
| 97 |
+
typename OutLayout,
|
| 98 |
+
typename InDataType,
|
| 99 |
+
typename WeiDataType,
|
| 100 |
+
typename OutDataType,
|
| 101 |
+
typename OutElementOp,
|
| 102 |
+
typename AComputeType = InDataType,
|
| 103 |
+
typename BComputeType = AComputeType>
|
| 104 |
+
bool profile_grouped_conv_fwd_outelementop_impl(int do_verification,
|
| 105 |
+
int init_method,
|
| 106 |
+
bool do_log,
|
| 107 |
+
bool time_kernel,
|
| 108 |
+
const ck::utils::conv::ConvParam& conv_param)
|
| 109 |
+
{
|
| 110 |
+
auto pass = true; // return status
|
| 111 |
+
|
| 112 |
+
using CShuffleDataType = float;
|
| 113 |
+
|
| 114 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 115 |
+
using InElementOp = PassThrough;
|
| 116 |
+
using WeiElementOp = PassThrough;
|
| 117 |
+
|
| 118 |
+
const auto in_element_op = InElementOp{};
|
| 119 |
+
const auto wei_element_op = WeiElementOp{};
|
| 120 |
+
|
| 121 |
+
const auto in_g_n_c_wis_desc =
|
| 122 |
+
ck::utils::conv::make_input_host_tensor_descriptor_g_n_c_wis_packed<InLayout>(conv_param);
|
| 123 |
+
|
| 124 |
+
const auto wei_g_k_c_xs_desc =
|
| 125 |
+
ck::utils::conv::make_weight_host_tensor_descriptor_g_k_c_xs_packed<WeiLayout>(conv_param);
|
| 126 |
+
|
| 127 |
+
const auto out_g_n_k_wos_desc =
|
| 128 |
+
ck::utils::conv::make_output_host_tensor_descriptor_g_n_k_wos_packed<OutLayout>(conv_param);
|
| 129 |
+
|
| 130 |
+
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_lengths{};
|
| 131 |
+
std::array<ck::index_t, NDimSpatial + 3> a_g_n_c_wis_strides{};
|
| 132 |
+
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_lengths{};
|
| 133 |
+
std::array<ck::index_t, NDimSpatial + 3> b_g_k_c_xs_strides{};
|
| 134 |
+
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_lengths{};
|
| 135 |
+
std::array<ck::index_t, NDimSpatial + 3> e_g_n_k_wos_strides{};
|
| 136 |
+
std::array<ck::index_t, NDimSpatial> conv_filter_strides{};
|
| 137 |
+
std::array<ck::index_t, NDimSpatial> conv_filter_dilations{};
|
| 138 |
+
std::array<ck::index_t, NDimSpatial> input_left_pads{};
|
| 139 |
+
std::array<ck::index_t, NDimSpatial> input_right_pads{};
|
| 140 |
+
|
| 141 |
+
auto copy = [](const auto& x, auto& y) { ck::ranges::copy(x, y.begin()); };
|
| 142 |
+
|
| 143 |
+
copy(in_g_n_c_wis_desc.GetLengths(), a_g_n_c_wis_lengths);
|
| 144 |
+
copy(in_g_n_c_wis_desc.GetStrides(), a_g_n_c_wis_strides);
|
| 145 |
+
copy(wei_g_k_c_xs_desc.GetLengths(), b_g_k_c_xs_lengths);
|
| 146 |
+
copy(wei_g_k_c_xs_desc.GetStrides(), b_g_k_c_xs_strides);
|
| 147 |
+
copy(out_g_n_k_wos_desc.GetLengths(), e_g_n_k_wos_lengths);
|
| 148 |
+
copy(out_g_n_k_wos_desc.GetStrides(), e_g_n_k_wos_strides);
|
| 149 |
+
copy(conv_param.conv_filter_strides_, conv_filter_strides);
|
| 150 |
+
copy(conv_param.conv_filter_dilations_, conv_filter_dilations);
|
| 151 |
+
copy(conv_param.input_left_pads_, input_left_pads);
|
| 152 |
+
copy(conv_param.input_right_pads_, input_right_pads);
|
| 153 |
+
|
| 154 |
+
Tensor<InDataType> input(in_g_n_c_wis_desc);
|
| 155 |
+
Tensor<WeiDataType> weight(wei_g_k_c_xs_desc);
|
| 156 |
+
Tensor<CShuffleDataType> c(out_g_n_k_wos_desc);
|
| 157 |
+
Tensor<OutDataType> host_output(out_g_n_k_wos_desc);
|
| 158 |
+
Tensor<OutDataType> device_output(out_g_n_k_wos_desc);
|
| 159 |
+
|
| 160 |
+
std::cout << "input: " << input.mDesc << std::endl;
|
| 161 |
+
std::cout << "weight: " << weight.mDesc << std::endl;
|
| 162 |
+
std::cout << "output: " << host_output.mDesc << std::endl;
|
| 163 |
+
|
| 164 |
+
switch(init_method)
|
| 165 |
+
{
|
| 166 |
+
case 0: break;
|
| 167 |
+
case 1:
|
| 168 |
+
input.GenerateTensorValue(GeneratorTensor_2<InDataType>{-5, 5});
|
| 169 |
+
weight.GenerateTensorValue(GeneratorTensor_2<WeiDataType>{-1, 1});
|
| 170 |
+
break;
|
| 171 |
+
default:
|
| 172 |
+
input.GenerateTensorValue(GeneratorTensor_3<InDataType>{-5.0, 5.0});
|
| 173 |
+
weight.GenerateTensorValue(GeneratorTensor_3<WeiDataType>{-1.0, 1.0});
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
DeviceMem in_device_buf(sizeof(InDataType) * input.mDesc.GetElementSpaceSize());
|
| 177 |
+
DeviceMem wei_device_buf(sizeof(WeiDataType) * weight.mDesc.GetElementSpaceSize());
|
| 178 |
+
DeviceMem out_device_buf(sizeof(OutDataType) * device_output.mDesc.GetElementSpaceSize());
|
| 179 |
+
|
| 180 |
+
in_device_buf.ToDevice(input.mData.data());
|
| 181 |
+
wei_device_buf.ToDevice(weight.mData.data());
|
| 182 |
+
|
| 183 |
+
// random scale values
|
| 184 |
+
auto scale_in = type_convert<float>(
|
| 185 |
+
type_convert<f8_t>(2.0f * float(RAND_MAX / 2 - std::rand()) / float(RAND_MAX)));
|
| 186 |
+
auto scale_wei = type_convert<float>(
|
| 187 |
+
type_convert<f8_t>(2.0f * float(RAND_MAX / 2 - std::rand()) / float(RAND_MAX)));
|
| 188 |
+
auto scale_out = type_convert<float>(
|
| 189 |
+
type_convert<f8_t>(2.0f * float(RAND_MAX / 2 - std::rand()) / float(RAND_MAX)));
|
| 190 |
+
|
| 191 |
+
// initialize out_element_op for each iteration
|
| 192 |
+
const auto out_element_op = OutElementOp{scale_in, scale_wei, scale_out};
|
| 193 |
+
|
| 194 |
+
std::cout << "scale_in: " << scale_in << std::endl;
|
| 195 |
+
std::cout << "scale_wei: " << scale_wei << std::endl;
|
| 196 |
+
std::cout << "scale_out: " << scale_out << std::endl;
|
| 197 |
+
|
| 198 |
+
// run reference op
|
| 199 |
+
if(do_verification)
|
| 200 |
+
{
|
| 201 |
+
|
| 202 |
+
std::cout << "\nVerifying algorithm against reference convolution..." << std::endl;
|
| 203 |
+
std::cout << "\tUsing (rel_tol,abs_tol) = (" << std::setprecision(7)
|
| 204 |
+
<< get_rtol<OutDataType>() << ", " << get_atol<OutDataType>() << ")" << std::endl;
|
| 205 |
+
|
| 206 |
+
auto ref_conv = ck::tensor_operation::host::ReferenceConvFwd<NDimSpatial,
|
| 207 |
+
InDataType,
|
| 208 |
+
WeiDataType,
|
| 209 |
+
CShuffleDataType,
|
| 210 |
+
InElementOp,
|
| 211 |
+
WeiElementOp,
|
| 212 |
+
PassThrough>{};
|
| 213 |
+
|
| 214 |
+
auto ref_invoker = ref_conv.MakeInvoker();
|
| 215 |
+
auto ref_argument = ref_conv.MakeArgument(input,
|
| 216 |
+
weight,
|
| 217 |
+
c,
|
| 218 |
+
conv_param.conv_filter_strides_,
|
| 219 |
+
conv_param.conv_filter_dilations_,
|
| 220 |
+
conv_param.input_left_pads_,
|
| 221 |
+
conv_param.input_right_pads_,
|
| 222 |
+
in_element_op,
|
| 223 |
+
wei_element_op,
|
| 224 |
+
PassThrough{});
|
| 225 |
+
|
| 226 |
+
c.SetZero();
|
| 227 |
+
ref_invoker.Run(ref_argument);
|
| 228 |
+
|
| 229 |
+
host_output.ForEach([&](auto&, auto idx) { out_element_op(host_output(idx), c(idx)); });
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
std::string best_op_name;
|
| 233 |
+
float best_avg_time = 0;
|
| 234 |
+
float best_tflops = 0;
|
| 235 |
+
float best_gb_per_sec = 0;
|
| 236 |
+
|
| 237 |
+
auto run_impl = [&](auto& op_ptr, auto& argument_ptr) {
|
| 238 |
+
if(op_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 239 |
+
{
|
| 240 |
+
// re-init output to zero before profiling next kernel
|
| 241 |
+
out_device_buf.SetZero();
|
| 242 |
+
|
| 243 |
+
std::string op_name = op_ptr->GetTypeString();
|
| 244 |
+
|
| 245 |
+
auto invoker_ptr = op_ptr->MakeInvokerPointer();
|
| 246 |
+
|
| 247 |
+
float avg_time =
|
| 248 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 249 |
+
|
| 250 |
+
std::size_t flop = conv_param.GetFlops();
|
| 251 |
+
std::size_t num_btype = conv_param.GetByte<InDataType, WeiDataType, OutDataType>();
|
| 252 |
+
|
| 253 |
+
float tflops = static_cast<float>(flop) / 1.E9 / avg_time;
|
| 254 |
+
|
| 255 |
+
float gb_per_sec = num_btype / 1.E6 / avg_time;
|
| 256 |
+
|
| 257 |
+
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << tflops << " TFlops, "
|
| 258 |
+
<< gb_per_sec << " GB/s, " << op_name << std::endl;
|
| 259 |
+
|
| 260 |
+
if(tflops > best_tflops)
|
| 261 |
+
{
|
| 262 |
+
best_op_name = op_name;
|
| 263 |
+
best_tflops = tflops;
|
| 264 |
+
best_avg_time = avg_time;
|
| 265 |
+
best_gb_per_sec = gb_per_sec;
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
if(do_verification)
|
| 269 |
+
{
|
| 270 |
+
out_device_buf.FromDevice(device_output.mData.data());
|
| 271 |
+
|
| 272 |
+
pass = pass & ck::utils::check_err(device_output,
|
| 273 |
+
host_output,
|
| 274 |
+
"Error: Device and Host results do not match!",
|
| 275 |
+
get_rtol<OutDataType>(),
|
| 276 |
+
get_atol<OutDataType>());
|
| 277 |
+
|
| 278 |
+
if(do_log)
|
| 279 |
+
{
|
| 280 |
+
LogRangeAsType<InDataType>(std::cout << "input : ", input.mData, ",")
|
| 281 |
+
<< std::endl;
|
| 282 |
+
LogRangeAsType<WeiDataType>(std::cout << "weight: ", weight.mData, ",")
|
| 283 |
+
<< std::endl;
|
| 284 |
+
LogRangeAsType<OutDataType>(
|
| 285 |
+
std::cout << "host_output : ", host_output.mData, ",")
|
| 286 |
+
<< std::endl;
|
| 287 |
+
LogRangeAsType<OutDataType>(
|
| 288 |
+
std::cout << "device_output: ", device_output.mData, ",")
|
| 289 |
+
<< std::endl;
|
| 290 |
+
}
|
| 291 |
+
}
|
| 292 |
+
}
|
| 293 |
+
else
|
| 294 |
+
{
|
| 295 |
+
std::cout << op_ptr->GetTypeString() << " does not support this problem" << std::endl;
|
| 296 |
+
}
|
| 297 |
+
};
|
| 298 |
+
|
| 299 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGroupedConvFwdMultipleABD<NDimSpatial,
|
| 300 |
+
InLayout,
|
| 301 |
+
WeiLayout,
|
| 302 |
+
ck::Tuple<>,
|
| 303 |
+
OutLayout,
|
| 304 |
+
InDataType,
|
| 305 |
+
WeiDataType,
|
| 306 |
+
ck::Tuple<>,
|
| 307 |
+
OutDataType,
|
| 308 |
+
InElementOp,
|
| 309 |
+
WeiElementOp,
|
| 310 |
+
OutElementOp,
|
| 311 |
+
AComputeType,
|
| 312 |
+
BComputeType>;
|
| 313 |
+
|
| 314 |
+
// get device op instances
|
| 315 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 316 |
+
DeviceOp>::GetInstances();
|
| 317 |
+
|
| 318 |
+
std::cout << "ckProfiler found " << op_ptrs.size() << " instances" << std::endl;
|
| 319 |
+
|
| 320 |
+
for(auto& op_ptr : op_ptrs)
|
| 321 |
+
{
|
| 322 |
+
auto argument_ptr = op_ptr->MakeArgumentPointer(in_device_buf.GetDeviceBuffer(),
|
| 323 |
+
wei_device_buf.GetDeviceBuffer(),
|
| 324 |
+
{},
|
| 325 |
+
out_device_buf.GetDeviceBuffer(),
|
| 326 |
+
a_g_n_c_wis_lengths,
|
| 327 |
+
a_g_n_c_wis_strides,
|
| 328 |
+
b_g_k_c_xs_lengths,
|
| 329 |
+
b_g_k_c_xs_strides,
|
| 330 |
+
{},
|
| 331 |
+
{},
|
| 332 |
+
e_g_n_k_wos_lengths,
|
| 333 |
+
e_g_n_k_wos_strides,
|
| 334 |
+
conv_filter_strides,
|
| 335 |
+
conv_filter_dilations,
|
| 336 |
+
input_left_pads,
|
| 337 |
+
input_right_pads,
|
| 338 |
+
in_element_op,
|
| 339 |
+
wei_element_op,
|
| 340 |
+
out_element_op);
|
| 341 |
+
|
| 342 |
+
run_impl(op_ptr, argument_ptr);
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
std::cout << "Best configuration parameters:"
|
| 346 |
+
<< "\nname: " << best_op_name << "\navg_time: " << best_avg_time
|
| 347 |
+
<< "\ntflops: " << best_tflops << "\nGB/s: " << best_gb_per_sec << std::endl;
|
| 348 |
+
return pass;
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
} // namespace profiler
|
| 352 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_gemm_fastgelu_impl.hpp
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/library/tensor_operation_instance/gpu/grouped_gemm_fastgelu.hpp"
|
| 10 |
+
#include "ck/library/utility/check_err.hpp"
|
| 11 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 12 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 13 |
+
#include "ck/library/utility/fill.hpp"
|
| 14 |
+
#include "ck/library/utility/literals.hpp"
|
| 15 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 16 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 17 |
+
#include "ck/tensor_operation/gpu/device/device_grouped_gemm.hpp"
|
| 18 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 19 |
+
|
| 20 |
+
namespace ck {
|
| 21 |
+
namespace profiler {
|
| 22 |
+
|
| 23 |
+
template <typename ADataType,
|
| 24 |
+
typename BDataType,
|
| 25 |
+
typename CDataType,
|
| 26 |
+
typename AccDataType,
|
| 27 |
+
typename ALayout,
|
| 28 |
+
typename BLayout,
|
| 29 |
+
typename CLayout>
|
| 30 |
+
bool profile_grouped_gemm_fastgelu_impl(int do_verification,
|
| 31 |
+
int init_method,
|
| 32 |
+
bool do_log,
|
| 33 |
+
bool time_kernel,
|
| 34 |
+
const std::vector<int>& Ms,
|
| 35 |
+
const std::vector<int>& Ns,
|
| 36 |
+
const std::vector<int>& Ks,
|
| 37 |
+
const std::vector<int>& StrideAs,
|
| 38 |
+
const std::vector<int>& StrideBs,
|
| 39 |
+
const std::vector<int>& StrideCs)
|
| 40 |
+
{
|
| 41 |
+
|
| 42 |
+
bool pass = true;
|
| 43 |
+
|
| 44 |
+
auto f_host_tensor_descriptor =
|
| 45 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 46 |
+
using namespace ck::literals;
|
| 47 |
+
|
| 48 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 49 |
+
{
|
| 50 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 51 |
+
}
|
| 52 |
+
else
|
| 53 |
+
{
|
| 54 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 55 |
+
}
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
std::size_t group_count = Ms.size();
|
| 59 |
+
|
| 60 |
+
if(!(group_count == Ns.size() && group_count == Ks.size() && group_count == StrideAs.size() &&
|
| 61 |
+
group_count == StrideBs.size() && group_count == StrideCs.size()))
|
| 62 |
+
{
|
| 63 |
+
throw std::runtime_error("wrong! inconsistent M/N/Ks, StrideA/B/Cs size\n");
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
std::vector<Tensor<ADataType>> a_m_k;
|
| 67 |
+
std::vector<Tensor<BDataType>> b_k_n;
|
| 68 |
+
std::vector<Tensor<CDataType>> c_m_n_device_results;
|
| 69 |
+
|
| 70 |
+
for(std::size_t i = 0; i < group_count; i++)
|
| 71 |
+
{
|
| 72 |
+
a_m_k.push_back(
|
| 73 |
+
Tensor<ADataType>(f_host_tensor_descriptor(Ms[i], Ks[i], StrideAs[i], ALayout{})));
|
| 74 |
+
b_k_n.push_back(
|
| 75 |
+
Tensor<BDataType>(f_host_tensor_descriptor(Ks[i], Ns[i], StrideBs[i], BLayout{})));
|
| 76 |
+
|
| 77 |
+
c_m_n_device_results.push_back(
|
| 78 |
+
Tensor<CDataType>(f_host_tensor_descriptor(Ms[i], Ns[i], StrideCs[i], CLayout{})));
|
| 79 |
+
|
| 80 |
+
std::cout << "group: " << i << " a_m_k[" << i << "]:" << a_m_k[i].mDesc << ", b_k_n[" << i
|
| 81 |
+
<< "]:" << b_k_n[i].mDesc << ", c_m_n_device_results[" << i
|
| 82 |
+
<< "]:" << c_m_n_device_results[i].mDesc << std::endl;
|
| 83 |
+
|
| 84 |
+
switch(init_method)
|
| 85 |
+
{
|
| 86 |
+
case 0: break;
|
| 87 |
+
case 1:
|
| 88 |
+
ck::utils::FillUniformDistributionIntegerValue<ADataType>{}(a_m_k[i]);
|
| 89 |
+
ck::utils::FillUniformDistributionIntegerValue<BDataType>{}(b_k_n[i]);
|
| 90 |
+
break;
|
| 91 |
+
default:
|
| 92 |
+
ck::utils::FillUniformDistribution<ADataType>{0.0, 1.0}(a_m_k[i]);
|
| 93 |
+
ck::utils::FillUniformDistribution<BDataType>{-0.5, 0.5}(b_k_n[i]);
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
ck::utils::FillConstant<CDataType>{}(c_m_n_device_results[i]);
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 100 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 101 |
+
using CElementOp = ck::tensor_operation::element_wise::FastGelu;
|
| 102 |
+
|
| 103 |
+
const auto a_element_op = AElementOp{};
|
| 104 |
+
const auto b_element_op = BElementOp{};
|
| 105 |
+
const auto c_element_op = CElementOp{};
|
| 106 |
+
|
| 107 |
+
using DeviceMemPtr = std::unique_ptr<DeviceMem>;
|
| 108 |
+
std::vector<DeviceMemPtr> a_device_buf, b_device_buf, c_device_buf;
|
| 109 |
+
|
| 110 |
+
a_device_buf.reserve(group_count);
|
| 111 |
+
b_device_buf.reserve(group_count);
|
| 112 |
+
c_device_buf.reserve(group_count);
|
| 113 |
+
|
| 114 |
+
std::vector<const void*> p_a, p_b;
|
| 115 |
+
std::vector<void*> p_c;
|
| 116 |
+
|
| 117 |
+
p_a.reserve(group_count);
|
| 118 |
+
p_b.reserve(group_count);
|
| 119 |
+
p_c.reserve(group_count);
|
| 120 |
+
|
| 121 |
+
std::vector<ck::tensor_operation::device::GemmDesc> gemm_descs;
|
| 122 |
+
|
| 123 |
+
gemm_descs.reserve(group_count);
|
| 124 |
+
|
| 125 |
+
for(std::size_t i = 0; i < group_count; i++)
|
| 126 |
+
{
|
| 127 |
+
a_device_buf.emplace_back(
|
| 128 |
+
std::make_unique<DeviceMem>(sizeof(ADataType) * a_m_k[i].mDesc.GetElementSpaceSize()));
|
| 129 |
+
b_device_buf.emplace_back(
|
| 130 |
+
std::make_unique<DeviceMem>(sizeof(BDataType) * b_k_n[i].mDesc.GetElementSpaceSize()));
|
| 131 |
+
c_device_buf.emplace_back(std::make_unique<DeviceMem>(
|
| 132 |
+
sizeof(CDataType) * c_m_n_device_results[i].mDesc.GetElementSpaceSize()));
|
| 133 |
+
|
| 134 |
+
a_device_buf[i]->ToDevice(a_m_k[i].mData.data());
|
| 135 |
+
b_device_buf[i]->ToDevice(b_k_n[i].mData.data());
|
| 136 |
+
c_device_buf[i]->SetZero();
|
| 137 |
+
|
| 138 |
+
gemm_descs.push_back({Ms[i], Ns[i], Ks[i], StrideAs[i], StrideBs[i], StrideCs[i], {}});
|
| 139 |
+
|
| 140 |
+
p_a.push_back(a_device_buf[i]->GetDeviceBuffer());
|
| 141 |
+
p_b.push_back(b_device_buf[i]->GetDeviceBuffer());
|
| 142 |
+
p_c.push_back(c_device_buf[i]->GetDeviceBuffer());
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGroupedGemm<ALayout,
|
| 146 |
+
BLayout,
|
| 147 |
+
ck::Tuple<>,
|
| 148 |
+
CLayout,
|
| 149 |
+
ADataType,
|
| 150 |
+
BDataType,
|
| 151 |
+
ck::Tuple<>,
|
| 152 |
+
CDataType,
|
| 153 |
+
AElementOp,
|
| 154 |
+
BElementOp,
|
| 155 |
+
CElementOp>;
|
| 156 |
+
|
| 157 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 158 |
+
DeviceOp>::GetInstances();
|
| 159 |
+
|
| 160 |
+
if(op_ptrs.size() <= 0)
|
| 161 |
+
{
|
| 162 |
+
throw std::runtime_error("wrong! no device GEMM instance found");
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
std::string best_gemm_name;
|
| 166 |
+
float best_ave_time = 0;
|
| 167 |
+
float best_tflops = 0;
|
| 168 |
+
float best_gb_per_sec = 0;
|
| 169 |
+
|
| 170 |
+
auto p_ds = std::vector<std::array<const void*, 0>>{};
|
| 171 |
+
|
| 172 |
+
// profile device GEMM instances
|
| 173 |
+
for(auto& gemm_ptr : op_ptrs)
|
| 174 |
+
{
|
| 175 |
+
auto argument_ptr = gemm_ptr->MakeArgumentPointer(
|
| 176 |
+
p_a, p_b, p_ds, p_c, gemm_descs, a_element_op, b_element_op, c_element_op);
|
| 177 |
+
|
| 178 |
+
auto invoker_ptr = gemm_ptr->MakeInvokerPointer();
|
| 179 |
+
DeviceMem gemm_desc_workspace(gemm_ptr->GetWorkSpaceSize(argument_ptr.get()));
|
| 180 |
+
gemm_ptr->SetWorkSpacePointer(argument_ptr.get(), gemm_desc_workspace.GetDeviceBuffer());
|
| 181 |
+
|
| 182 |
+
if(gemm_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 183 |
+
{
|
| 184 |
+
std::string gemm_name = gemm_ptr->GetTypeString();
|
| 185 |
+
|
| 186 |
+
float ave_time =
|
| 187 |
+
invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 188 |
+
|
| 189 |
+
std::size_t flop = 0, num_btype = 0;
|
| 190 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 191 |
+
{
|
| 192 |
+
flop += std::size_t(2) * Ms[i] * Ns[i] * Ks[i];
|
| 193 |
+
num_btype += sizeof(ADataType) * Ms[i] * Ks[i] + sizeof(BDataType) * Ks[i] * Ns[i] +
|
| 194 |
+
sizeof(CDataType) * Ms[i] * Ns[i];
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 198 |
+
|
| 199 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 200 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops << " TFlops, "
|
| 201 |
+
<< gb_per_sec << " GB/s, " << gemm_name << std::endl;
|
| 202 |
+
|
| 203 |
+
if(tflops > best_tflops)
|
| 204 |
+
{
|
| 205 |
+
best_gemm_name = gemm_name;
|
| 206 |
+
best_tflops = tflops;
|
| 207 |
+
best_ave_time = ave_time;
|
| 208 |
+
best_gb_per_sec = gb_per_sec;
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
if(do_verification)
|
| 212 |
+
{
|
| 213 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 214 |
+
{
|
| 215 |
+
|
| 216 |
+
c_device_buf[i]->FromDevice(c_m_n_device_results[i].mData.data());
|
| 217 |
+
Tensor<CDataType> c_m_n_host_result(
|
| 218 |
+
f_host_tensor_descriptor(Ms[i], Ns[i], StrideCs[i], CLayout{}));
|
| 219 |
+
|
| 220 |
+
using ReferenceGemmInstance =
|
| 221 |
+
ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 222 |
+
BDataType,
|
| 223 |
+
CDataType,
|
| 224 |
+
AccDataType,
|
| 225 |
+
AElementOp,
|
| 226 |
+
BElementOp,
|
| 227 |
+
CElementOp>;
|
| 228 |
+
|
| 229 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 230 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 231 |
+
auto ref_argument = ref_gemm.MakeArgument(a_m_k[i],
|
| 232 |
+
b_k_n[i],
|
| 233 |
+
c_m_n_host_result,
|
| 234 |
+
a_element_op,
|
| 235 |
+
b_element_op,
|
| 236 |
+
c_element_op);
|
| 237 |
+
|
| 238 |
+
ref_invoker.Run(ref_argument);
|
| 239 |
+
|
| 240 |
+
bool group_pass =
|
| 241 |
+
ck::utils::check_err(c_m_n_device_results[i], c_m_n_host_result);
|
| 242 |
+
pass = pass && group_pass;
|
| 243 |
+
|
| 244 |
+
std::cout << "group: " << i << " verification result: " << std::boolalpha
|
| 245 |
+
<< group_pass << std::endl;
|
| 246 |
+
|
| 247 |
+
if(do_log)
|
| 248 |
+
{
|
| 249 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k[i].mData, ",")
|
| 250 |
+
<< std::endl;
|
| 251 |
+
LogRangeAsType<float>(std::cout << "b: ", b_k_n[i].mData, ",") << std::endl;
|
| 252 |
+
LogRangeAsType<float>(
|
| 253 |
+
std::cout << "c_device: ", c_m_n_device_results[i].mData, ",")
|
| 254 |
+
<< std::endl;
|
| 255 |
+
LogRangeAsType<float>(
|
| 256 |
+
std::cout << "c_host : ", c_m_n_host_result.mData, ",")
|
| 257 |
+
<< std::endl;
|
| 258 |
+
}
|
| 259 |
+
}
|
| 260 |
+
}
|
| 261 |
+
}
|
| 262 |
+
else
|
| 263 |
+
{
|
| 264 |
+
std::cout << "does not support this GEMM problem" << std::endl;
|
| 265 |
+
}
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
if(do_verification)
|
| 269 |
+
{
|
| 270 |
+
std::cout << "Verification: " << (pass ? "SUCCESS" : "FAILURE") << std::endl;
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 274 |
+
<< best_gb_per_sec << " GB/s, " << best_gemm_name << std::endl;
|
| 275 |
+
|
| 276 |
+
return pass;
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
} // namespace profiler
|
| 280 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_gemm_fixed_nk_impl.hpp
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/utility/env.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/device_grouped_gemm_fixed_nk.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 13 |
+
|
| 14 |
+
#include "ck/library/tensor_operation_instance/gpu/grouped_gemm_fixed_nk.hpp"
|
| 15 |
+
|
| 16 |
+
#include "ck/library/utility/check_err.hpp"
|
| 17 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
| 18 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 19 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 21 |
+
#include "ck/library/utility/literals.hpp"
|
| 22 |
+
#include "ck/library/utility/fill.hpp"
|
| 23 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 24 |
+
|
| 25 |
+
namespace ck {
|
| 26 |
+
namespace profiler {
|
| 27 |
+
|
| 28 |
+
template <typename ADataType,
|
| 29 |
+
typename BDataType,
|
| 30 |
+
typename CDataType,
|
| 31 |
+
typename AccDataType,
|
| 32 |
+
typename ALayout,
|
| 33 |
+
typename BLayout,
|
| 34 |
+
typename CLayout>
|
| 35 |
+
bool profile_grouped_gemm_fixed_nk_impl(int do_verification,
|
| 36 |
+
int init_method,
|
| 37 |
+
bool do_log,
|
| 38 |
+
bool time_kernel,
|
| 39 |
+
const std::vector<int>& Ms,
|
| 40 |
+
const std::vector<int>& Ns,
|
| 41 |
+
const std::vector<int>& Ks,
|
| 42 |
+
const std::vector<int>& StrideAs,
|
| 43 |
+
const std::vector<int>& StrideBs,
|
| 44 |
+
const std::vector<int>& StrideCs,
|
| 45 |
+
int kbatch = 1,
|
| 46 |
+
int n_warmup = 1,
|
| 47 |
+
int n_iter = 10)
|
| 48 |
+
{
|
| 49 |
+
bool pass = true;
|
| 50 |
+
|
| 51 |
+
auto f_host_tensor_descriptor =
|
| 52 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 53 |
+
using namespace ck::literals;
|
| 54 |
+
|
| 55 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 56 |
+
{
|
| 57 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 58 |
+
}
|
| 59 |
+
else
|
| 60 |
+
{
|
| 61 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 62 |
+
}
|
| 63 |
+
};
|
| 64 |
+
|
| 65 |
+
std::size_t group_count = Ms.size();
|
| 66 |
+
|
| 67 |
+
if(!(group_count == Ns.size() && group_count == Ks.size() && group_count == StrideAs.size() &&
|
| 68 |
+
group_count == StrideBs.size() && group_count == StrideCs.size()))
|
| 69 |
+
{
|
| 70 |
+
throw std::runtime_error("wrong! inconsistent M/N/Ks, StrideA/B/Cs size\n");
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
std::vector<Tensor<ADataType>> a_m_k;
|
| 74 |
+
std::vector<Tensor<BDataType>> b_k_n;
|
| 75 |
+
std::vector<Tensor<CDataType>> c_m_n_host_results;
|
| 76 |
+
std::vector<Tensor<CDataType>> c_m_n_device_results;
|
| 77 |
+
int sum_of_m = 0;
|
| 78 |
+
|
| 79 |
+
for(std::size_t i = 0; i < group_count; i++)
|
| 80 |
+
{
|
| 81 |
+
sum_of_m += Ms[i];
|
| 82 |
+
a_m_k.push_back(
|
| 83 |
+
Tensor<ADataType>(f_host_tensor_descriptor(Ms[i], Ks[i], StrideAs[i], ALayout{})));
|
| 84 |
+
b_k_n.push_back(
|
| 85 |
+
Tensor<BDataType>(f_host_tensor_descriptor(Ks[i], Ns[i], StrideBs[i], BLayout{})));
|
| 86 |
+
|
| 87 |
+
c_m_n_device_results.push_back(
|
| 88 |
+
Tensor<CDataType>(f_host_tensor_descriptor(Ms[i], Ns[i], StrideCs[i], CLayout{})));
|
| 89 |
+
|
| 90 |
+
c_m_n_host_results.push_back(
|
| 91 |
+
Tensor<CDataType>(f_host_tensor_descriptor(Ms[i], Ns[i], StrideCs[i], CLayout{})));
|
| 92 |
+
if(ck::EnvIsEnabled(CK_ENV(CK_LOGGING)))
|
| 93 |
+
{
|
| 94 |
+
std::cout << "group: " << i << " a_m_k[" << i << "]:" << a_m_k[i].mDesc << ", b_k_n["
|
| 95 |
+
<< i << "]:" << b_k_n[i].mDesc << ", c_m_n_device_results[" << i
|
| 96 |
+
<< "]:" << c_m_n_device_results[i].mDesc << std::endl;
|
| 97 |
+
}
|
| 98 |
+
std::size_t num_thread = 1;
|
| 99 |
+
switch(init_method)
|
| 100 |
+
{
|
| 101 |
+
case 0: break;
|
| 102 |
+
case 1:
|
| 103 |
+
a_m_k[i].GenerateTensorValue(GeneratorTensor_2<ADataType>{-5, 5}, num_thread);
|
| 104 |
+
b_k_n[i].GenerateTensorValue(GeneratorTensor_2<BDataType>{-5, 5}, num_thread);
|
| 105 |
+
break;
|
| 106 |
+
default:
|
| 107 |
+
a_m_k[i].GenerateTensorValue(GeneratorTensor_3<ADataType>{0.0, 1.0}, num_thread);
|
| 108 |
+
b_k_n[i].GenerateTensorValue(GeneratorTensor_3<BDataType>{-0.5, 0.5}, num_thread);
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 113 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 114 |
+
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 115 |
+
|
| 116 |
+
const auto a_element_op = AElementOp{};
|
| 117 |
+
const auto b_element_op = BElementOp{};
|
| 118 |
+
const auto c_element_op = CElementOp{};
|
| 119 |
+
|
| 120 |
+
using DeviceMemPtr = std::unique_ptr<DeviceMem>;
|
| 121 |
+
std::vector<DeviceMemPtr> a_device_buf, b_device_buf, c_device_buf;
|
| 122 |
+
|
| 123 |
+
a_device_buf.reserve(group_count);
|
| 124 |
+
b_device_buf.reserve(group_count);
|
| 125 |
+
c_device_buf.reserve(group_count);
|
| 126 |
+
|
| 127 |
+
std::vector<const void*> p_a, p_b;
|
| 128 |
+
std::vector<void*> p_c;
|
| 129 |
+
|
| 130 |
+
p_a.reserve(group_count);
|
| 131 |
+
p_b.reserve(group_count);
|
| 132 |
+
p_c.reserve(group_count);
|
| 133 |
+
|
| 134 |
+
std::vector<ck::tensor_operation::device::GemmDesc> gemm_descs;
|
| 135 |
+
gemm_descs.reserve(group_count);
|
| 136 |
+
|
| 137 |
+
std::vector<ck::tensor_operation::device::GroupedGemmKernelArgument<1>>
|
| 138 |
+
grouped_gemm_kernel_args_;
|
| 139 |
+
grouped_gemm_kernel_args_.reserve(group_count);
|
| 140 |
+
|
| 141 |
+
for(std::size_t i = 0; i < group_count; i++)
|
| 142 |
+
{
|
| 143 |
+
a_device_buf.emplace_back(
|
| 144 |
+
std::make_unique<DeviceMem>(sizeof(ADataType) * a_m_k[i].mDesc.GetElementSpaceSize()));
|
| 145 |
+
b_device_buf.emplace_back(
|
| 146 |
+
std::make_unique<DeviceMem>(sizeof(BDataType) * b_k_n[i].mDesc.GetElementSpaceSize()));
|
| 147 |
+
c_device_buf.emplace_back(std::make_unique<DeviceMem>(
|
| 148 |
+
sizeof(CDataType) * c_m_n_device_results[i].mDesc.GetElementSpaceSize()));
|
| 149 |
+
|
| 150 |
+
a_device_buf[i]->ToDevice(a_m_k[i].mData.data());
|
| 151 |
+
b_device_buf[i]->ToDevice(b_k_n[i].mData.data());
|
| 152 |
+
|
| 153 |
+
gemm_descs.push_back({sum_of_m, Ns[i], Ks[i], StrideAs[i], StrideBs[i], StrideCs[i], {}});
|
| 154 |
+
|
| 155 |
+
p_a.push_back(a_device_buf[i]->GetDeviceBuffer());
|
| 156 |
+
p_b.push_back(b_device_buf[i]->GetDeviceBuffer());
|
| 157 |
+
p_c.push_back(c_device_buf[i]->GetDeviceBuffer());
|
| 158 |
+
|
| 159 |
+
grouped_gemm_kernel_args_.push_back({a_device_buf[i]->GetDeviceBuffer(),
|
| 160 |
+
b_device_buf[i]->GetDeviceBuffer(),
|
| 161 |
+
{},
|
| 162 |
+
c_device_buf[i]->GetDeviceBuffer(),
|
| 163 |
+
Ms[i],
|
| 164 |
+
Ns[i],
|
| 165 |
+
Ks[i],
|
| 166 |
+
StrideAs[i],
|
| 167 |
+
StrideBs[i],
|
| 168 |
+
{},
|
| 169 |
+
StrideCs[i]});
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGroupedGemmFixedNK<ALayout,
|
| 173 |
+
BLayout,
|
| 174 |
+
ck::Tuple<>,
|
| 175 |
+
CLayout,
|
| 176 |
+
ADataType,
|
| 177 |
+
BDataType,
|
| 178 |
+
ck::Tuple<>,
|
| 179 |
+
CDataType,
|
| 180 |
+
AElementOp,
|
| 181 |
+
BElementOp,
|
| 182 |
+
CElementOp>;
|
| 183 |
+
|
| 184 |
+
const auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 185 |
+
DeviceOp>::GetInstances();
|
| 186 |
+
|
| 187 |
+
if(op_ptrs.size() <= 0)
|
| 188 |
+
{
|
| 189 |
+
throw std::runtime_error("wrong! no device GEMM instance found");
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
std::string best_gemm_name;
|
| 193 |
+
float best_ave_time = 0;
|
| 194 |
+
float best_tflops = 0;
|
| 195 |
+
float best_gb_per_sec = 0;
|
| 196 |
+
float best_kbatch = 0;
|
| 197 |
+
|
| 198 |
+
auto p_ds = std::vector<std::array<const void*, 0>>{};
|
| 199 |
+
|
| 200 |
+
if(do_verification)
|
| 201 |
+
{
|
| 202 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 203 |
+
{
|
| 204 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 205 |
+
BDataType,
|
| 206 |
+
CDataType,
|
| 207 |
+
AccDataType,
|
| 208 |
+
AElementOp,
|
| 209 |
+
BElementOp,
|
| 210 |
+
CElementOp>;
|
| 211 |
+
|
| 212 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 213 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 214 |
+
|
| 215 |
+
auto ref_argument = ref_gemm.MakeArgument(a_m_k[i],
|
| 216 |
+
b_k_n[i],
|
| 217 |
+
c_m_n_host_results[i],
|
| 218 |
+
a_element_op,
|
| 219 |
+
b_element_op,
|
| 220 |
+
c_element_op);
|
| 221 |
+
|
| 222 |
+
ref_invoker.Run(ref_argument);
|
| 223 |
+
}
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
// profile device GEMM instances
|
| 227 |
+
for(auto& gemm_ptr : op_ptrs)
|
| 228 |
+
{
|
| 229 |
+
auto argument_ptr =
|
| 230 |
+
gemm_ptr->MakeArgumentPointer(p_a,
|
| 231 |
+
p_b,
|
| 232 |
+
p_ds,
|
| 233 |
+
p_c,
|
| 234 |
+
gemm_descs,
|
| 235 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
| 236 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
| 237 |
+
ck::tensor_operation::element_wise::PassThrough{});
|
| 238 |
+
|
| 239 |
+
auto invoker_ptr = gemm_ptr->MakeInvokerPointer();
|
| 240 |
+
|
| 241 |
+
DeviceMem gemm_desc_workspace(gemm_ptr->GetWorkSpaceSize(argument_ptr.get()));
|
| 242 |
+
|
| 243 |
+
DeviceMem grouped_gemm_kernel_args_dev(
|
| 244 |
+
gemm_ptr->GetDeviceKernelArgSize(argument_ptr.get()));
|
| 245 |
+
|
| 246 |
+
hipGetErrorString(hipMemcpy(grouped_gemm_kernel_args_dev.GetDeviceBuffer(),
|
| 247 |
+
grouped_gemm_kernel_args_.data(),
|
| 248 |
+
gemm_ptr->GetDeviceKernelArgSize(argument_ptr.get()),
|
| 249 |
+
hipMemcpyHostToDevice));
|
| 250 |
+
|
| 251 |
+
gemm_ptr->SetWorkSpacePointer(argument_ptr.get(), gemm_desc_workspace.GetDeviceBuffer());
|
| 252 |
+
|
| 253 |
+
gemm_ptr->SetDeviceKernelArgs(argument_ptr.get(),
|
| 254 |
+
grouped_gemm_kernel_args_dev.GetDeviceBuffer());
|
| 255 |
+
|
| 256 |
+
std::string gemm_name = gemm_ptr->GetTypeString();
|
| 257 |
+
|
| 258 |
+
std::vector<int> kbatch_list = {1, 2, 4, 8, 12, 16, 20, 24, 32, 48, 64};
|
| 259 |
+
|
| 260 |
+
if(kbatch > 0)
|
| 261 |
+
{
|
| 262 |
+
kbatch_list = {kbatch};
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
for(std::size_t j = 0; j < kbatch_list.size(); j++)
|
| 266 |
+
{
|
| 267 |
+
|
| 268 |
+
auto kbatch_curr = kbatch_list[j];
|
| 269 |
+
|
| 270 |
+
gemm_ptr->SetKBatch(argument_ptr.get(), kbatch_curr);
|
| 271 |
+
|
| 272 |
+
if(gemm_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 273 |
+
{
|
| 274 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 275 |
+
c_device_buf[i]->SetZero();
|
| 276 |
+
|
| 277 |
+
invoker_ptr->Run(argument_ptr.get(),
|
| 278 |
+
StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 279 |
+
|
| 280 |
+
if(do_verification)
|
| 281 |
+
{
|
| 282 |
+
bool instance_pass = true;
|
| 283 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 284 |
+
{
|
| 285 |
+
|
| 286 |
+
c_device_buf[i]->FromDevice(c_m_n_device_results[i].mData.data());
|
| 287 |
+
|
| 288 |
+
if(std::is_same_v<CDataType, ck::half_t> && kbatch_curr > 1)
|
| 289 |
+
{
|
| 290 |
+
instance_pass =
|
| 291 |
+
instance_pass && ck::utils::check_err(c_m_n_device_results[i],
|
| 292 |
+
c_m_n_host_results[i],
|
| 293 |
+
"Error: Incorrect results!",
|
| 294 |
+
0.06);
|
| 295 |
+
}
|
| 296 |
+
else
|
| 297 |
+
{
|
| 298 |
+
instance_pass =
|
| 299 |
+
instance_pass && ck::utils::check_err(c_m_n_device_results[i],
|
| 300 |
+
c_m_n_host_results[i]);
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
if(do_log)
|
| 304 |
+
{
|
| 305 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k[i].mData, ",")
|
| 306 |
+
<< std::endl;
|
| 307 |
+
LogRangeAsType<float>(std::cout << "b: ", b_k_n[i].mData, ",")
|
| 308 |
+
<< std::endl;
|
| 309 |
+
LogRangeAsType<float>(
|
| 310 |
+
std::cout << "c_device: ", c_m_n_device_results[i].mData, ",")
|
| 311 |
+
<< std::endl;
|
| 312 |
+
LogRangeAsType<float>(
|
| 313 |
+
std::cout << "c_host : ", c_m_n_host_results[i].mData, ",")
|
| 314 |
+
<< std::endl;
|
| 315 |
+
}
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
std::cout << "Instance: " << gemm_name << " verification "
|
| 319 |
+
<< (instance_pass ? "SUCCEED" : "FAILED") << std::endl;
|
| 320 |
+
|
| 321 |
+
pass = pass && instance_pass;
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
float ave_time = invoker_ptr->Run(
|
| 325 |
+
argument_ptr.get(), StreamConfig{nullptr, time_kernel, 0, n_warmup, n_iter});
|
| 326 |
+
|
| 327 |
+
if(time_kernel)
|
| 328 |
+
{
|
| 329 |
+
std::size_t flop = 0, num_btype = 0;
|
| 330 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 331 |
+
{
|
| 332 |
+
flop += std::size_t(2) * Ms[i] * Ns[i] * Ks[i];
|
| 333 |
+
|
| 334 |
+
num_btype += sizeof(ADataType) * Ms[i] * Ks[i] +
|
| 335 |
+
sizeof(BDataType) * Ks[i] * Ns[i] +
|
| 336 |
+
sizeof(CDataType) * Ms[i] * Ns[i];
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 340 |
+
|
| 341 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 342 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops
|
| 343 |
+
<< " TFlops, " << gb_per_sec << " GB/s, " << gemm_name << ", KBatch "
|
| 344 |
+
<< kbatch_curr << std::endl;
|
| 345 |
+
|
| 346 |
+
if(tflops > best_tflops)
|
| 347 |
+
{
|
| 348 |
+
best_gemm_name = gemm_name;
|
| 349 |
+
best_tflops = tflops;
|
| 350 |
+
best_ave_time = ave_time;
|
| 351 |
+
best_gb_per_sec = gb_per_sec;
|
| 352 |
+
best_kbatch = kbatch_curr;
|
| 353 |
+
}
|
| 354 |
+
}
|
| 355 |
+
}
|
| 356 |
+
else
|
| 357 |
+
{
|
| 358 |
+
std::cout << "Instance: " << gemm_name << ", does not support this GEMM problem"
|
| 359 |
+
<< std::endl;
|
| 360 |
+
}
|
| 361 |
+
}
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
if(time_kernel)
|
| 365 |
+
{
|
| 366 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 367 |
+
<< best_gb_per_sec << " GB/s, " << best_gemm_name << ", KBatch = " << best_kbatch
|
| 368 |
+
<< std::endl;
|
| 369 |
+
}
|
| 370 |
+
return pass;
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
} // namespace profiler
|
| 374 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_grouped_gemm_impl.hpp
ADDED
|
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
#include "ck/utility/env.hpp"
|
| 10 |
+
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
|
| 11 |
+
#include "ck/tensor_operation/gpu/device/device_grouped_gemm.hpp"
|
| 12 |
+
#include "ck/tensor_operation/gpu/device/device_grouped_gemm_splitk.hpp"
|
| 13 |
+
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
|
| 14 |
+
|
| 15 |
+
#include "ck/library/tensor_operation_instance/gpu/grouped_gemm.hpp"
|
| 16 |
+
|
| 17 |
+
#include "ck/library/utility/check_err.hpp"
|
| 18 |
+
#include "ck/library/utility/convolution_parameter.hpp"
|
| 19 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 20 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 21 |
+
#include "ck/library/utility/literals.hpp"
|
| 22 |
+
#include "ck/library/utility/fill.hpp"
|
| 23 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_gemm.hpp"
|
| 24 |
+
|
| 25 |
+
namespace ck {
|
| 26 |
+
namespace profiler {
|
| 27 |
+
|
| 28 |
+
template <typename ADataType,
|
| 29 |
+
typename BDataType,
|
| 30 |
+
typename CDataType,
|
| 31 |
+
typename AccDataType,
|
| 32 |
+
typename ALayout,
|
| 33 |
+
typename BLayout,
|
| 34 |
+
typename CLayout>
|
| 35 |
+
bool profile_grouped_gemm_impl(int do_verification,
|
| 36 |
+
int init_method,
|
| 37 |
+
bool do_log,
|
| 38 |
+
bool time_kernel,
|
| 39 |
+
const std::vector<int>& Ms,
|
| 40 |
+
const std::vector<int>& Ns,
|
| 41 |
+
const std::vector<int>& Ks,
|
| 42 |
+
const std::vector<int>& StrideAs,
|
| 43 |
+
const std::vector<int>& StrideBs,
|
| 44 |
+
const std::vector<int>& StrideCs,
|
| 45 |
+
const std::vector<int>& kbatches = {},
|
| 46 |
+
int n_warmup = 1,
|
| 47 |
+
int n_iter = 10)
|
| 48 |
+
{
|
| 49 |
+
bool pass = true;
|
| 50 |
+
// TODO: Fixme - we do not pass compute data type here but need it
|
| 51 |
+
// to compute error thresholds.
|
| 52 |
+
using ComputeDataType = ADataType;
|
| 53 |
+
|
| 54 |
+
auto f_host_tensor_descriptor =
|
| 55 |
+
[](std::size_t row, std::size_t col, std::size_t stride, auto layout) {
|
| 56 |
+
using namespace ck::literals;
|
| 57 |
+
|
| 58 |
+
if(is_same<decltype(layout), tensor_layout::gemm::RowMajor>::value)
|
| 59 |
+
{
|
| 60 |
+
return HostTensorDescriptor({row, col}, {stride, 1_uz});
|
| 61 |
+
}
|
| 62 |
+
else
|
| 63 |
+
{
|
| 64 |
+
return HostTensorDescriptor({row, col}, {1_uz, stride});
|
| 65 |
+
}
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
std::size_t group_count = Ms.size();
|
| 69 |
+
|
| 70 |
+
if(!(group_count == Ns.size() && group_count == Ks.size() && group_count == StrideAs.size() &&
|
| 71 |
+
group_count == StrideBs.size() && group_count == StrideCs.size()))
|
| 72 |
+
{
|
| 73 |
+
throw std::runtime_error("wrong! inconsistent M/N/Ks, StrideA/B/Cs size\n");
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
std::vector<Tensor<ADataType>> a_m_k;
|
| 77 |
+
std::vector<Tensor<BDataType>> b_k_n;
|
| 78 |
+
std::vector<Tensor<CDataType>> c_m_n_host_results;
|
| 79 |
+
std::vector<Tensor<CDataType>> c_m_n_device_results;
|
| 80 |
+
|
| 81 |
+
double max_abs_in_val = 0.f;
|
| 82 |
+
for(std::size_t i = 0; i < group_count; i++)
|
| 83 |
+
{
|
| 84 |
+
a_m_k.push_back(
|
| 85 |
+
Tensor<ADataType>(f_host_tensor_descriptor(Ms[i], Ks[i], StrideAs[i], ALayout{})));
|
| 86 |
+
b_k_n.push_back(
|
| 87 |
+
Tensor<BDataType>(f_host_tensor_descriptor(Ks[i], Ns[i], StrideBs[i], BLayout{})));
|
| 88 |
+
|
| 89 |
+
c_m_n_device_results.push_back(
|
| 90 |
+
Tensor<CDataType>(f_host_tensor_descriptor(Ms[i], Ns[i], StrideCs[i], CLayout{})));
|
| 91 |
+
|
| 92 |
+
c_m_n_host_results.push_back(
|
| 93 |
+
Tensor<CDataType>(f_host_tensor_descriptor(Ms[i], Ns[i], StrideCs[i], CLayout{})));
|
| 94 |
+
if(ck::EnvIsEnabled(CK_ENV(CK_LOGGING)))
|
| 95 |
+
{
|
| 96 |
+
std::cout << "group: " << i << " a_m_k[" << i << "]:" << a_m_k[i].mDesc << ", b_k_n["
|
| 97 |
+
<< i << "]:" << b_k_n[i].mDesc << ", c_m_n_device_results[" << i
|
| 98 |
+
<< "]:" << c_m_n_device_results[i].mDesc << std::endl;
|
| 99 |
+
}
|
| 100 |
+
switch(init_method)
|
| 101 |
+
{
|
| 102 |
+
case 0: break;
|
| 103 |
+
case 1:
|
| 104 |
+
ck::utils::FillUniformDistributionIntegerValue<ADataType>{-2.f, 2.f}(a_m_k[i]);
|
| 105 |
+
ck::utils::FillUniformDistributionIntegerValue<BDataType>{-2.f, 2.f}(b_k_n[i]);
|
| 106 |
+
max_abs_in_val = 2.f;
|
| 107 |
+
break;
|
| 108 |
+
default:
|
| 109 |
+
ck::utils::FillUniformDistribution<ADataType>{-0.5f, 0.5f}(a_m_k[i]);
|
| 110 |
+
ck::utils::FillUniformDistribution<BDataType>{-0.5f, 0.5f}(b_k_n[i]);
|
| 111 |
+
max_abs_in_val = 0.5f;
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
using AElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 116 |
+
using BElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 117 |
+
using CElementOp = ck::tensor_operation::element_wise::PassThrough;
|
| 118 |
+
|
| 119 |
+
const auto a_element_op = AElementOp{};
|
| 120 |
+
const auto b_element_op = BElementOp{};
|
| 121 |
+
const auto c_element_op = CElementOp{};
|
| 122 |
+
|
| 123 |
+
using DeviceMemPtr = std::unique_ptr<DeviceMem>;
|
| 124 |
+
std::vector<DeviceMemPtr> a_device_buf, b_device_buf, c_device_buf;
|
| 125 |
+
|
| 126 |
+
a_device_buf.reserve(group_count);
|
| 127 |
+
b_device_buf.reserve(group_count);
|
| 128 |
+
c_device_buf.reserve(group_count);
|
| 129 |
+
|
| 130 |
+
std::vector<const void*> p_a, p_b;
|
| 131 |
+
std::vector<void*> p_c;
|
| 132 |
+
|
| 133 |
+
p_a.reserve(group_count);
|
| 134 |
+
p_b.reserve(group_count);
|
| 135 |
+
p_c.reserve(group_count);
|
| 136 |
+
|
| 137 |
+
std::vector<ck::tensor_operation::device::GemmDesc> gemm_descs;
|
| 138 |
+
|
| 139 |
+
gemm_descs.reserve(group_count);
|
| 140 |
+
|
| 141 |
+
for(std::size_t i = 0; i < group_count; i++)
|
| 142 |
+
{
|
| 143 |
+
a_device_buf.emplace_back(
|
| 144 |
+
std::make_unique<DeviceMem>(sizeof(ADataType) * a_m_k[i].mDesc.GetElementSpaceSize()));
|
| 145 |
+
b_device_buf.emplace_back(
|
| 146 |
+
std::make_unique<DeviceMem>(sizeof(BDataType) * b_k_n[i].mDesc.GetElementSpaceSize()));
|
| 147 |
+
c_device_buf.emplace_back(std::make_unique<DeviceMem>(
|
| 148 |
+
sizeof(CDataType) * c_m_n_device_results[i].mDesc.GetElementSpaceSize()));
|
| 149 |
+
|
| 150 |
+
a_device_buf[i]->ToDevice(a_m_k[i].mData.data());
|
| 151 |
+
b_device_buf[i]->ToDevice(b_k_n[i].mData.data());
|
| 152 |
+
|
| 153 |
+
gemm_descs.push_back({Ms[i], Ns[i], Ks[i], StrideAs[i], StrideBs[i], StrideCs[i], {}});
|
| 154 |
+
|
| 155 |
+
p_a.push_back(a_device_buf[i]->GetDeviceBuffer());
|
| 156 |
+
p_b.push_back(b_device_buf[i]->GetDeviceBuffer());
|
| 157 |
+
p_c.push_back(c_device_buf[i]->GetDeviceBuffer());
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
using DeviceOp = ck::tensor_operation::device::DeviceGroupedGemm<ALayout,
|
| 161 |
+
BLayout,
|
| 162 |
+
ck::Tuple<>,
|
| 163 |
+
CLayout,
|
| 164 |
+
ADataType,
|
| 165 |
+
BDataType,
|
| 166 |
+
ck::Tuple<>,
|
| 167 |
+
CDataType,
|
| 168 |
+
AElementOp,
|
| 169 |
+
BElementOp,
|
| 170 |
+
CElementOp>;
|
| 171 |
+
|
| 172 |
+
// If kbatch would be bigger than 1, then we will use SplitK version.
|
| 173 |
+
using DeviceOpSplitK = ck::tensor_operation::device::DeviceGroupedGemmSplitK<ALayout,
|
| 174 |
+
BLayout,
|
| 175 |
+
ck::Tuple<>,
|
| 176 |
+
CLayout,
|
| 177 |
+
ADataType,
|
| 178 |
+
BDataType,
|
| 179 |
+
ck::Tuple<>,
|
| 180 |
+
CDataType,
|
| 181 |
+
AElementOp,
|
| 182 |
+
BElementOp,
|
| 183 |
+
CElementOp>;
|
| 184 |
+
|
| 185 |
+
auto op_ptrs = ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 186 |
+
DeviceOp>::GetInstances();
|
| 187 |
+
|
| 188 |
+
if(op_ptrs.size() <= 0)
|
| 189 |
+
{
|
| 190 |
+
throw std::runtime_error("wrong! no device GEMM instance found");
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
std::string best_gemm_name;
|
| 194 |
+
float best_ave_time = 0;
|
| 195 |
+
float best_tflops = 0;
|
| 196 |
+
float best_gb_per_sec = 0;
|
| 197 |
+
float best_kbatch = 0;
|
| 198 |
+
|
| 199 |
+
auto p_ds = std::vector<std::array<const void*, 0>>{};
|
| 200 |
+
|
| 201 |
+
if(do_verification)
|
| 202 |
+
{
|
| 203 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 204 |
+
{
|
| 205 |
+
using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataType,
|
| 206 |
+
BDataType,
|
| 207 |
+
CDataType,
|
| 208 |
+
AccDataType,
|
| 209 |
+
AElementOp,
|
| 210 |
+
BElementOp,
|
| 211 |
+
CElementOp>;
|
| 212 |
+
|
| 213 |
+
auto ref_gemm = ReferenceGemmInstance{};
|
| 214 |
+
auto ref_invoker = ref_gemm.MakeInvoker();
|
| 215 |
+
|
| 216 |
+
auto ref_argument = ref_gemm.MakeArgument(a_m_k[i],
|
| 217 |
+
b_k_n[i],
|
| 218 |
+
c_m_n_host_results[i],
|
| 219 |
+
a_element_op,
|
| 220 |
+
b_element_op,
|
| 221 |
+
c_element_op);
|
| 222 |
+
|
| 223 |
+
ref_invoker.Run(ref_argument);
|
| 224 |
+
}
|
| 225 |
+
}
|
| 226 |
+
// profile device GEMM instances
|
| 227 |
+
for(auto& gemm_ptr : op_ptrs)
|
| 228 |
+
{
|
| 229 |
+
auto argument_ptr =
|
| 230 |
+
gemm_ptr->MakeArgumentPointer(p_a,
|
| 231 |
+
p_b,
|
| 232 |
+
p_ds,
|
| 233 |
+
p_c,
|
| 234 |
+
gemm_descs,
|
| 235 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
| 236 |
+
ck::tensor_operation::element_wise::PassThrough{},
|
| 237 |
+
ck::tensor_operation::element_wise::PassThrough{});
|
| 238 |
+
|
| 239 |
+
auto invoker_ptr = gemm_ptr->MakeInvokerPointer();
|
| 240 |
+
|
| 241 |
+
std::size_t workspace_size = gemm_ptr->GetWorkSpaceSize(argument_ptr.get());
|
| 242 |
+
std::size_t kargs_size = gemm_ptr->GetDeviceKernelArgSize(argument_ptr.get());
|
| 243 |
+
|
| 244 |
+
DeviceMem gemm_workspace, gemm_kargs;
|
| 245 |
+
|
| 246 |
+
// The following is necessary since TwoStage kernel is using additional memory both
|
| 247 |
+
// for Workspace and kernel arguments.
|
| 248 |
+
if(kargs_size > 0)
|
| 249 |
+
{
|
| 250 |
+
gemm_kargs.Realloc(kargs_size);
|
| 251 |
+
gemm_ptr->SetDeviceKernelArgs(argument_ptr.get(), gemm_kargs.GetDeviceBuffer());
|
| 252 |
+
}
|
| 253 |
+
if(workspace_size > 0 && workspace_size != kargs_size)
|
| 254 |
+
{
|
| 255 |
+
gemm_workspace.Realloc(workspace_size);
|
| 256 |
+
gemm_ptr->SetWorkSpacePointer(argument_ptr.get(), gemm_workspace.GetDeviceBuffer());
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
std::string gemm_name = gemm_ptr->GetTypeString();
|
| 260 |
+
|
| 261 |
+
std::vector<int> kbatch_list = {1, 2, 4, 8, 12, 16, 20, 24, 32, 48, 64};
|
| 262 |
+
|
| 263 |
+
// If the user will provide not empty kbatches list, then we test predefined set of kbatch
|
| 264 |
+
// values.
|
| 265 |
+
if(!kbatches.empty())
|
| 266 |
+
{
|
| 267 |
+
kbatch_list = kbatches;
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
for(std::size_t j = 0; j < kbatch_list.size(); j++)
|
| 271 |
+
{
|
| 272 |
+
auto kbatch_curr = kbatch_list[j];
|
| 273 |
+
|
| 274 |
+
if(kbatch_curr > 1 && dynamic_cast<DeviceOpSplitK*>(gemm_ptr.get()) != nullptr)
|
| 275 |
+
{
|
| 276 |
+
dynamic_cast<DeviceOpSplitK*>(gemm_ptr.get())
|
| 277 |
+
->SetKBatchSize(argument_ptr.get(), kbatch_curr);
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
if(gemm_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 281 |
+
{
|
| 282 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 283 |
+
c_device_buf[i]->SetZero();
|
| 284 |
+
|
| 285 |
+
invoker_ptr->Run(argument_ptr.get(),
|
| 286 |
+
StreamConfig{nullptr, false, 0, n_warmup, n_iter});
|
| 287 |
+
|
| 288 |
+
if(do_verification)
|
| 289 |
+
{
|
| 290 |
+
bool instance_pass = true;
|
| 291 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 292 |
+
{
|
| 293 |
+
c_device_buf[i]->FromDevice(c_m_n_device_results[i].mData.data());
|
| 294 |
+
auto atol = ck::utils::get_absolute_threshold<ComputeDataType, CDataType>(
|
| 295 |
+
max_abs_in_val, gemm_descs[i].K_);
|
| 296 |
+
auto rtol = ck::utils::get_relative_threshold<ComputeDataType, CDataType>(
|
| 297 |
+
gemm_descs[i].K_);
|
| 298 |
+
|
| 299 |
+
instance_pass =
|
| 300 |
+
instance_pass && ck::utils::check_err(c_m_n_device_results[i],
|
| 301 |
+
c_m_n_host_results[i],
|
| 302 |
+
"Error: Incorrect results!",
|
| 303 |
+
rtol,
|
| 304 |
+
atol);
|
| 305 |
+
|
| 306 |
+
if(do_log)
|
| 307 |
+
{
|
| 308 |
+
LogRangeAsType<float>(std::cout << "a : ", a_m_k[i].mData, ",")
|
| 309 |
+
<< std::endl;
|
| 310 |
+
LogRangeAsType<float>(std::cout << "b: ", b_k_n[i].mData, ",")
|
| 311 |
+
<< std::endl;
|
| 312 |
+
LogRangeAsType<float>(
|
| 313 |
+
std::cout << "c_device: ", c_m_n_device_results[i].mData, ",")
|
| 314 |
+
<< std::endl;
|
| 315 |
+
LogRangeAsType<float>(
|
| 316 |
+
std::cout << "c_host : ", c_m_n_host_results[i].mData, ",")
|
| 317 |
+
<< std::endl;
|
| 318 |
+
}
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
std::cout << "Instance: " << gemm_name << " verification "
|
| 322 |
+
<< (instance_pass ? "SUCCEED" : "FAILED") << std::endl;
|
| 323 |
+
|
| 324 |
+
pass = pass && instance_pass;
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
if(time_kernel)
|
| 328 |
+
{
|
| 329 |
+
float ave_time =
|
| 330 |
+
invoker_ptr->Run(argument_ptr.get(),
|
| 331 |
+
StreamConfig{nullptr, time_kernel, 0, n_warmup, n_iter});
|
| 332 |
+
|
| 333 |
+
std::size_t flop = 0, num_btype = 0;
|
| 334 |
+
for(std::size_t i = 0; i < gemm_descs.size(); i++)
|
| 335 |
+
{
|
| 336 |
+
flop += std::size_t(2) * Ms[i] * Ns[i] * Ks[i];
|
| 337 |
+
|
| 338 |
+
num_btype += sizeof(ADataType) * Ms[i] * Ks[i] +
|
| 339 |
+
sizeof(BDataType) * Ks[i] * Ns[i] +
|
| 340 |
+
sizeof(CDataType) * Ms[i] * Ns[i];
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
float tflops = static_cast<float>(flop) / 1.E9 / ave_time;
|
| 344 |
+
|
| 345 |
+
float gb_per_sec = num_btype / 1.E6 / ave_time;
|
| 346 |
+
std::cout << "Perf: " << std::setw(10) << ave_time << " ms, " << tflops
|
| 347 |
+
<< " TFlops, " << gb_per_sec << " GB/s, " << gemm_name << ", KBatch "
|
| 348 |
+
<< kbatch_curr << std::endl;
|
| 349 |
+
|
| 350 |
+
if(tflops > best_tflops)
|
| 351 |
+
{
|
| 352 |
+
best_gemm_name = gemm_name;
|
| 353 |
+
best_tflops = tflops;
|
| 354 |
+
best_ave_time = ave_time;
|
| 355 |
+
best_gb_per_sec = gb_per_sec;
|
| 356 |
+
best_kbatch = kbatch_curr;
|
| 357 |
+
}
|
| 358 |
+
}
|
| 359 |
+
}
|
| 360 |
+
else
|
| 361 |
+
{
|
| 362 |
+
std::cout << "Instance: " << gemm_name << ", does not support this GEMM problem"
|
| 363 |
+
<< std::endl;
|
| 364 |
+
}
|
| 365 |
+
}
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
if(time_kernel)
|
| 369 |
+
{
|
| 370 |
+
std::cout << "Best Perf: " << best_ave_time << " ms, " << best_tflops << " TFlops, "
|
| 371 |
+
<< best_gb_per_sec << " GB/s, " << best_gemm_name << ", KBatch = " << best_kbatch
|
| 372 |
+
<< std::endl;
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
return pass;
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
} // namespace profiler
|
| 379 |
+
} // namespace ck
|
Code/Baselines/flash-attention/csrc/composable_kernel/profiler/include/profiler/profile_groupnorm_fwd_impl.hpp
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// SPDX-License-Identifier: MIT
|
| 2 |
+
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
|
| 3 |
+
|
| 4 |
+
#pragma once
|
| 5 |
+
|
| 6 |
+
#include <iomanip>
|
| 7 |
+
|
| 8 |
+
#include "ck/ck.hpp"
|
| 9 |
+
|
| 10 |
+
#include "ck/library/tensor_operation_instance/gpu/normalization_fwd.hpp"
|
| 11 |
+
|
| 12 |
+
#include "ck/library/utility/check_err.hpp"
|
| 13 |
+
#include "ck/library/utility/device_memory.hpp"
|
| 14 |
+
#include "ck/library/utility/host_tensor.hpp"
|
| 15 |
+
#include "ck/library/utility/host_tensor_generator.hpp"
|
| 16 |
+
#include "ck/library/reference_tensor_operation/cpu/reference_groupnorm.hpp"
|
| 17 |
+
|
| 18 |
+
namespace ck {
|
| 19 |
+
namespace profiler {
|
| 20 |
+
|
| 21 |
+
template <typename XDataType,
|
| 22 |
+
typename GammaDataType,
|
| 23 |
+
typename BetaDataType,
|
| 24 |
+
typename ComputeDataType,
|
| 25 |
+
typename YDataType,
|
| 26 |
+
typename SaveMeanInvStdDataType,
|
| 27 |
+
bool SaveMeanInvStd>
|
| 28 |
+
bool profile_groupnorm_impl(int do_verification,
|
| 29 |
+
int init_method,
|
| 30 |
+
bool do_log,
|
| 31 |
+
bool time_kernel,
|
| 32 |
+
std::vector<index_t> length)
|
| 33 |
+
{
|
| 34 |
+
using PassThrough = ck::tensor_operation::element_wise::PassThrough;
|
| 35 |
+
|
| 36 |
+
if(length.size() != 5)
|
| 37 |
+
return false;
|
| 38 |
+
|
| 39 |
+
index_t N = length[0];
|
| 40 |
+
index_t G = length[3];
|
| 41 |
+
index_t C = length[4];
|
| 42 |
+
|
| 43 |
+
std::vector<index_t> reduce_dim = {1, 2, 4};
|
| 44 |
+
std::vector<index_t> gammaBetaLength = {G, C};
|
| 45 |
+
std::vector<index_t> gammaBetaStride = {0, 0, 0, C, 1};
|
| 46 |
+
|
| 47 |
+
Tensor<XDataType> x(length);
|
| 48 |
+
Tensor<GammaDataType> gamma(gammaBetaLength);
|
| 49 |
+
Tensor<BetaDataType> beta(gammaBetaLength);
|
| 50 |
+
Tensor<YDataType> y(length);
|
| 51 |
+
Tensor<SaveMeanInvStdDataType> save_mean({N, G});
|
| 52 |
+
Tensor<SaveMeanInvStdDataType> save_inv_std({N, G});
|
| 53 |
+
|
| 54 |
+
Tensor<YDataType> host_y(length);
|
| 55 |
+
Tensor<SaveMeanInvStdDataType> host_save_mean({N, G});
|
| 56 |
+
Tensor<SaveMeanInvStdDataType> host_save_inv_std({N, G});
|
| 57 |
+
|
| 58 |
+
std::vector<index_t> strideSaveMeanInvStd = {1};
|
| 59 |
+
|
| 60 |
+
switch(init_method)
|
| 61 |
+
{
|
| 62 |
+
case 0:
|
| 63 |
+
x.GenerateTensorValue(GeneratorTensor_1<XDataType>{});
|
| 64 |
+
gamma.GenerateTensorValue(GeneratorTensor_1<GammaDataType>{});
|
| 65 |
+
beta.GenerateTensorValue(GeneratorTensor_1<BetaDataType>{});
|
| 66 |
+
break;
|
| 67 |
+
case 1:
|
| 68 |
+
x.GenerateTensorValue(GeneratorTensor_2<XDataType>{-5, 5});
|
| 69 |
+
gamma.GenerateTensorValue(GeneratorTensor_2<GammaDataType>{-5, 5});
|
| 70 |
+
beta.GenerateTensorValue(GeneratorTensor_2<BetaDataType>{-5, 5});
|
| 71 |
+
break;
|
| 72 |
+
default:
|
| 73 |
+
x.GenerateTensorValue(GeneratorTensor_3<XDataType>{0, 1});
|
| 74 |
+
gamma.GenerateTensorValue(GeneratorTensor_3<GammaDataType>{-0.5, 0.5});
|
| 75 |
+
beta.GenerateTensorValue(GeneratorTensor_3<BetaDataType>{-0.5, 0.5});
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
DeviceMem x_dev(sizeof(XDataType) * x.mDesc.GetElementSpaceSize());
|
| 79 |
+
DeviceMem gamma_dev(sizeof(GammaDataType) * gamma.mDesc.GetElementSpaceSize());
|
| 80 |
+
DeviceMem beta_dev(sizeof(BetaDataType) * beta.mDesc.GetElementSpaceSize());
|
| 81 |
+
DeviceMem y_dev(sizeof(YDataType) * y.mDesc.GetElementSpaceSize());
|
| 82 |
+
DeviceMem save_mean_dev(sizeof(SaveMeanInvStdDataType) * save_mean.mDesc.GetElementSpaceSize());
|
| 83 |
+
DeviceMem save_inv_std_dev(sizeof(SaveMeanInvStdDataType) *
|
| 84 |
+
save_inv_std.mDesc.GetElementSpaceSize());
|
| 85 |
+
|
| 86 |
+
x_dev.ToDevice(x.mData.data());
|
| 87 |
+
gamma_dev.ToDevice(gamma.mData.data());
|
| 88 |
+
beta_dev.ToDevice(beta.mData.data());
|
| 89 |
+
|
| 90 |
+
// add device normalization instances
|
| 91 |
+
using DeviceOp = ck::tensor_operation::device::DeviceNormalizationFwd<XDataType,
|
| 92 |
+
GammaDataType,
|
| 93 |
+
BetaDataType,
|
| 94 |
+
YDataType,
|
| 95 |
+
SaveMeanInvStdDataType,
|
| 96 |
+
PassThrough,
|
| 97 |
+
5,
|
| 98 |
+
3>;
|
| 99 |
+
|
| 100 |
+
// get device op instances
|
| 101 |
+
const auto instance_ptrs =
|
| 102 |
+
ck::tensor_operation::device::instance::DeviceOperationInstanceFactory<
|
| 103 |
+
DeviceOp>::GetInstances();
|
| 104 |
+
|
| 105 |
+
std::cout << "found " << instance_ptrs.size() << " instances" << std::endl;
|
| 106 |
+
|
| 107 |
+
std::string best_instance_name;
|
| 108 |
+
float best_avg_time = std::numeric_limits<float>::max();
|
| 109 |
+
float best_gb_per_sec = 0;
|
| 110 |
+
|
| 111 |
+
if(do_verification)
|
| 112 |
+
{
|
| 113 |
+
using ReferenceInstance =
|
| 114 |
+
ck::tensor_operation::host::ReferenceGroupnorm<XDataType,
|
| 115 |
+
GammaDataType,
|
| 116 |
+
BetaDataType,
|
| 117 |
+
YDataType,
|
| 118 |
+
SaveMeanInvStdDataType,
|
| 119 |
+
ComputeDataType,
|
| 120 |
+
PassThrough>;
|
| 121 |
+
|
| 122 |
+
ReferenceInstance ref;
|
| 123 |
+
auto ref_argument = ref.MakeArgument(
|
| 124 |
+
x, gamma, beta, host_y, host_save_mean, host_save_inv_std, PassThrough{}, length, 1e-6);
|
| 125 |
+
auto ref_invoker = ref.MakeInvoker();
|
| 126 |
+
ref_invoker.Run(ref_argument);
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
int num_kernel = 0;
|
| 130 |
+
|
| 131 |
+
auto f_get_argument = [&](auto& inst_ptr) {
|
| 132 |
+
if constexpr(SaveMeanInvStd)
|
| 133 |
+
return inst_ptr->MakeArgumentPointer(
|
| 134 |
+
length,
|
| 135 |
+
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()},
|
| 136 |
+
gammaBetaStride,
|
| 137 |
+
gammaBetaStride,
|
| 138 |
+
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
| 139 |
+
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
|
| 140 |
+
save_mean.mDesc.GetStrides().end()},
|
| 141 |
+
std::vector<ck::index_t>{save_inv_std.mDesc.GetStrides().begin(),
|
| 142 |
+
save_inv_std.mDesc.GetStrides().end()},
|
| 143 |
+
reduce_dim,
|
| 144 |
+
1e-6,
|
| 145 |
+
x_dev.GetDeviceBuffer(),
|
| 146 |
+
gamma_dev.GetDeviceBuffer(),
|
| 147 |
+
beta_dev.GetDeviceBuffer(),
|
| 148 |
+
y_dev.GetDeviceBuffer(),
|
| 149 |
+
save_mean_dev.GetDeviceBuffer(),
|
| 150 |
+
save_inv_std_dev.GetDeviceBuffer(),
|
| 151 |
+
PassThrough{});
|
| 152 |
+
else
|
| 153 |
+
return inst_ptr->MakeArgumentPointer(
|
| 154 |
+
length,
|
| 155 |
+
std::vector<ck::index_t>{x.mDesc.GetStrides().begin(), x.mDesc.GetStrides().end()},
|
| 156 |
+
gammaBetaStride,
|
| 157 |
+
gammaBetaStride,
|
| 158 |
+
std::vector<ck::index_t>{y.mDesc.GetStrides().begin(), y.mDesc.GetStrides().end()},
|
| 159 |
+
std::vector<ck::index_t>{save_mean.mDesc.GetStrides().begin(),
|
| 160 |
+
save_mean.mDesc.GetStrides().end()},
|
| 161 |
+
std::vector<ck::index_t>{save_inv_std.mDesc.GetStrides().begin(),
|
| 162 |
+
save_inv_std.mDesc.GetStrides().end()},
|
| 163 |
+
reduce_dim,
|
| 164 |
+
1e-6,
|
| 165 |
+
x_dev.GetDeviceBuffer(),
|
| 166 |
+
gamma_dev.GetDeviceBuffer(),
|
| 167 |
+
beta_dev.GetDeviceBuffer(),
|
| 168 |
+
y_dev.GetDeviceBuffer(),
|
| 169 |
+
nullptr,
|
| 170 |
+
nullptr,
|
| 171 |
+
PassThrough{});
|
| 172 |
+
};
|
| 173 |
+
|
| 174 |
+
for(auto& inst_ptr : instance_ptrs)
|
| 175 |
+
{
|
| 176 |
+
auto argument_ptr = f_get_argument(inst_ptr);
|
| 177 |
+
|
| 178 |
+
if(inst_ptr->IsSupportedArgument(argument_ptr.get()))
|
| 179 |
+
{
|
| 180 |
+
++num_kernel;
|
| 181 |
+
}
|
| 182 |
+
else
|
| 183 |
+
{
|
| 184 |
+
continue;
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
size_t workspace_sz = inst_ptr->GetWorkSpaceSize(argument_ptr.get());
|
| 188 |
+
DeviceMem workspace_dev(workspace_sz);
|
| 189 |
+
inst_ptr->SetWorkSpacePointer(argument_ptr.get(), workspace_dev.GetDeviceBuffer());
|
| 190 |
+
|
| 191 |
+
auto invoker_ptr = inst_ptr->MakeInvokerPointer();
|
| 192 |
+
|
| 193 |
+
float avg_time = invoker_ptr->Run(argument_ptr.get(), StreamConfig{nullptr, time_kernel});
|
| 194 |
+
|
| 195 |
+
std::size_t num_bytes = x.mDesc.GetElementSize() * sizeof(XDataType) +
|
| 196 |
+
gamma.mDesc.GetElementSize() * sizeof(GammaDataType) +
|
| 197 |
+
beta.mDesc.GetElementSize() * sizeof(BetaDataType) +
|
| 198 |
+
y.mDesc.GetElementSize() * sizeof(YDataType);
|
| 199 |
+
|
| 200 |
+
if constexpr(SaveMeanInvStd)
|
| 201 |
+
num_bytes += save_mean.mDesc.GetElementSpaceSize() * sizeof(SaveMeanInvStdDataType) +
|
| 202 |
+
save_inv_std.mDesc.GetElementSpaceSize() * sizeof(SaveMeanInvStdDataType);
|
| 203 |
+
|
| 204 |
+
float gb_per_sec = num_bytes / 1.E6 / avg_time;
|
| 205 |
+
|
| 206 |
+
if(time_kernel)
|
| 207 |
+
std::cout << "Perf: " << std::setw(10) << avg_time << " ms, " << gb_per_sec << " GB/s, "
|
| 208 |
+
<< inst_ptr->GetTypeString() << std::endl;
|
| 209 |
+
|
| 210 |
+
if(avg_time < best_avg_time)
|
| 211 |
+
{
|
| 212 |
+
best_instance_name = inst_ptr->GetTypeString();
|
| 213 |
+
best_avg_time = avg_time;
|
| 214 |
+
best_gb_per_sec = gb_per_sec;
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
if(do_verification)
|
| 218 |
+
{
|
| 219 |
+
y_dev.FromDevice(y.mData.data());
|
| 220 |
+
bool pass = ck::utils::check_err(y, host_y, "Error: Incorrect results", 1e-3, 1e-3);
|
| 221 |
+
|
| 222 |
+
if constexpr(SaveMeanInvStd)
|
| 223 |
+
{
|
| 224 |
+
save_mean_dev.FromDevice(save_mean.mData.data());
|
| 225 |
+
pass &= ck::utils::check_err(
|
| 226 |
+
save_mean.mData, host_save_mean.mData, "Error: Incorrect results", 1e-3, 1e-3);
|
| 227 |
+
|
| 228 |
+
save_inv_std_dev.FromDevice(save_inv_std.mData.data());
|
| 229 |
+
pass &= ck::utils::check_err(save_inv_std.mData,
|
| 230 |
+
host_save_inv_std.mData,
|
| 231 |
+
"Error: Incorrect results",
|
| 232 |
+
1e-3,
|
| 233 |
+
1e-3);
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
if(do_log)
|
| 237 |
+
{
|
| 238 |
+
LogRangeAsType<float>(std::cout << "x : ", x.mData, ",") << std::endl;
|
| 239 |
+
LogRangeAsType<float>(std::cout << "host_y : ", host_y.mData, ",") << std::endl;
|
| 240 |
+
LogRangeAsType<float>(std::cout << "y : ", y.mData, ",") << std::endl;
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
if(!pass)
|
| 244 |
+
{
|
| 245 |
+
std::cout << inst_ptr->GetTypeString() << " failed verification: ";
|
| 246 |
+
LogRange(std::cout << "lengths = [", length, ", ") << "]." << std::endl;
|
| 247 |
+
return false;
|
| 248 |
+
}
|
| 249 |
+
else
|
| 250 |
+
{
|
| 251 |
+
if(time_kernel)
|
| 252 |
+
std::cout << "pass" << std::endl;
|
| 253 |
+
}
|
| 254 |
+
}
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
if(time_kernel)
|
| 258 |
+
{
|
| 259 |
+
LogRange(std::cout << "length = ", length, ",") << std::endl;
|
| 260 |
+
std::cout << "best perf = " << best_avg_time << " ms, " << best_gb_per_sec << " GB/s, "
|
| 261 |
+
<< best_instance_name << std::endl;
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
if(num_kernel == 0)
|
| 265 |
+
{
|
| 266 |
+
std::cout << "Error: No kernel is applicable" << std::endl;
|
| 267 |
+
return false;
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
return true;
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
} // namespace profiler
|
| 274 |
+
} // namespace ck
|